2024-11-15 08:26:08,275 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-15 08:26:08,289 main DEBUG Took 0.011567 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-15 08:26:08,289 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-15 08:26:08,290 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-15 08:26:08,291 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-15 08:26:08,292 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,300 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-15 08:26:08,316 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,319 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,320 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,322 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,322 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,323 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,325 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,326 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,326 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,327 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,327 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,328 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,329 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,329 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,330 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,330 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,331 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,331 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,332 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-15 08:26:08,333 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,333 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-15 08:26:08,335 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-15 08:26:08,337 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-15 08:26:08,339 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-15 08:26:08,340 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-15 08:26:08,341 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-15 08:26:08,342 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-15 08:26:08,354 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-15 08:26:08,358 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-15 08:26:08,360 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-15 08:26:08,361 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-15 08:26:08,361 main DEBUG createAppenders(={Console}) 2024-11-15 08:26:08,362 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-11-15 08:26:08,362 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-11-15 08:26:08,363 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-11-15 08:26:08,364 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-15 08:26:08,364 main DEBUG OutputStream closed 2024-11-15 08:26:08,365 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-15 08:26:08,365 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-15 08:26:08,365 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-11-15 08:26:08,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-15 08:26:08,501 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-15 08:26:08,504 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-15 08:26:08,505 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-15 08:26:08,506 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-15 08:26:08,507 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-15 08:26:08,507 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-15 08:26:08,508 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-15 08:26:08,508 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-15 08:26:08,509 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-15 08:26:08,509 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-15 08:26:08,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-15 08:26:08,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-15 08:26:08,510 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-15 08:26:08,511 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-15 08:26:08,511 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-15 08:26:08,512 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-15 08:26:08,513 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-15 08:26:08,520 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15 08:26:08,521 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-11-15 08:26:08,522 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-15 08:26:08,523 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-11-15T08:26:08,542 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-15 08:26:08,545 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-15 08:26:08,545 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-15T08:26:09,148 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a 2024-11-15T08:26:09,150 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-11-15T08:26:09,211 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-15T08:26:09,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-15T08:26:09,494 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673, deleteOnExit=true 2024-11-15T08:26:09,494 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-15T08:26:09,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/test.cache.data in system properties and HBase conf 2024-11-15T08:26:09,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T08:26:09,496 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir in system properties and HBase conf 2024-11-15T08:26:09,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T08:26:09,497 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T08:26:09,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T08:26:09,613 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-15T08:26:09,620 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T08:26:09,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T08:26:09,628 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T08:26:09,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T08:26:09,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T08:26:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T08:26:09,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T08:26:09,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T08:26:09,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T08:26:09,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/nfs.dump.dir in system properties and HBase conf 2024-11-15T08:26:09,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir in system properties and HBase conf 2024-11-15T08:26:09,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T08:26:09,638 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T08:26:09,639 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T08:26:10,709 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-15T08:26:10,820 INFO [Time-limited test {}] log.Log(170): Logging initialized @3806ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-15T08:26:10,943 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:11,039 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:11,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:11,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:11,106 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T08:26:11,127 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:11,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:11,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:11,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a55f3e1{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-44195-hadoop-hdfs-3_4_1-tests_jar-_-any-10156514412922892555/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T08:26:11,403 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44195} 2024-11-15T08:26:11,403 INFO [Time-limited test {}] server.Server(415): Started @4390ms 2024-11-15T08:26:11,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:11,892 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:11,893 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:11,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:11,894 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T08:26:11,896 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134642c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:11,896 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@480cc207{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:12,049 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1adf9305{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-42601-hadoop-hdfs-3_4_1-tests_jar-_-any-18252198732293396374/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T08:26:12,050 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:42601} 2024-11-15T08:26:12,051 INFO [Time-limited test {}] server.Server(415): Started @5038ms 2024-11-15T08:26:12,138 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T08:26:12,367 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:12,383 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:12,395 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:12,395 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:12,395 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T08:26:12,396 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ed86ab3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:12,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@246a58e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:12,562 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@19299b82{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-36651-hadoop-hdfs-3_4_1-tests_jar-_-any-2879322158334079098/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T08:26:12,563 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:36651} 2024-11-15T08:26:12,564 INFO [Time-limited test {}] server.Server(415): Started @5551ms 2024-11-15T08:26:12,567 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T08:26:12,690 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:12,697 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:12,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:12,704 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:12,704 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T08:26:12,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e41eadb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:12,707 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@544a54bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:12,846 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365/current, will proceed with Du for space computation calculation, 2024-11-15T08:26:12,846 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365/current, will proceed with Du for space computation calculation, 2024-11-15T08:26:12,865 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365/current, will proceed with Du for space computation calculation, 2024-11-15T08:26:12,866 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365/current, will proceed with Du for space computation calculation, 2024-11-15T08:26:12,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d3bb473{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-38923-hadoop-hdfs-3_4_1-tests_jar-_-any-12624292082322880104/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T08:26:12,873 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:38923} 2024-11-15T08:26:12,874 INFO [Time-limited test {}] server.Server(415): Started @5861ms 2024-11-15T08:26:12,878 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-15T08:26:12,963 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T08:26:12,963 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T08:26:13,076 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec5da4ee2d3afe13 with lease ID 0x9fba31096bc8e5bb: Processing first storage report for DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05 from datanode DatanodeRegistration(127.0.0.1:38947, datanodeUuid=0a1a9b44-863a-4e7a-8b1e-cbf3e3019079, infoPort=38337, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365) 2024-11-15T08:26:13,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec5da4ee2d3afe13 with lease ID 0x9fba31096bc8e5bb: from storage DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05 node DatanodeRegistration(127.0.0.1:38947, datanodeUuid=0a1a9b44-863a-4e7a-8b1e-cbf3e3019079, infoPort=38337, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T08:26:13,078 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7f2eeb7b6c848a2 with lease ID 0x9fba31096bc8e5bc: Processing first storage report for DS-734a140c-0d60-4e41-a710-e466e0a866d2 from datanode DatanodeRegistration(127.0.0.1:32865, datanodeUuid=205d8336-1109-4287-9974-ecd9ff72979d, infoPort=39297, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365) 2024-11-15T08:26:13,078 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7f2eeb7b6c848a2 with lease ID 0x9fba31096bc8e5bc: from storage DS-734a140c-0d60-4e41-a710-e466e0a866d2 node DatanodeRegistration(127.0.0.1:32865, datanodeUuid=205d8336-1109-4287-9974-ecd9ff72979d, infoPort=39297, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T08:26:13,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xec5da4ee2d3afe13 with lease ID 0x9fba31096bc8e5bb: Processing first storage report for DS-c7d5ba66-120c-4bd3-8fad-cba0b6855a82 from datanode DatanodeRegistration(127.0.0.1:38947, datanodeUuid=0a1a9b44-863a-4e7a-8b1e-cbf3e3019079, infoPort=38337, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365) 2024-11-15T08:26:13,079 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xec5da4ee2d3afe13 with lease ID 0x9fba31096bc8e5bb: from storage DS-c7d5ba66-120c-4bd3-8fad-cba0b6855a82 node DatanodeRegistration(127.0.0.1:38947, datanodeUuid=0a1a9b44-863a-4e7a-8b1e-cbf3e3019079, infoPort=38337, infoSecurePort=0, ipcPort=35191, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T08:26:13,079 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7f2eeb7b6c848a2 with lease ID 0x9fba31096bc8e5bc: Processing first storage report for DS-3374ffa8-024a-46e6-98ae-bba5763cc322 from datanode DatanodeRegistration(127.0.0.1:32865, datanodeUuid=205d8336-1109-4287-9974-ecd9ff72979d, infoPort=39297, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365) 2024-11-15T08:26:13,080 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7f2eeb7b6c848a2 with lease ID 0x9fba31096bc8e5bc: from storage DS-3374ffa8-024a-46e6-98ae-bba5763cc322 node DatanodeRegistration(127.0.0.1:32865, datanodeUuid=205d8336-1109-4287-9974-ecd9ff72979d, infoPort=39297, infoSecurePort=0, ipcPort=45925, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T08:26:13,132 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365/current, will proceed with Du for space computation calculation, 2024-11-15T08:26:13,141 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365/current, will proceed with Du for space computation calculation, 2024-11-15T08:26:13,196 WARN [Thread-123 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-15T08:26:13,206 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf649c5161702ec8c with lease ID 0x9fba31096bc8e5bd: Processing first storage report for DS-a0f7502b-4b1a-4860-bf80-256464e6d99e from datanode DatanodeRegistration(127.0.0.1:37633, datanodeUuid=759e3899-d572-409a-8cbc-d6e36be1dc36, infoPort=36349, infoSecurePort=0, ipcPort=46235, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365) 2024-11-15T08:26:13,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf649c5161702ec8c with lease ID 0x9fba31096bc8e5bd: from storage DS-a0f7502b-4b1a-4860-bf80-256464e6d99e node DatanodeRegistration(127.0.0.1:37633, datanodeUuid=759e3899-d572-409a-8cbc-d6e36be1dc36, infoPort=36349, infoSecurePort=0, ipcPort=46235, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-15T08:26:13,207 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf649c5161702ec8c with lease ID 0x9fba31096bc8e5bd: Processing first storage report for DS-10cbb8e8-f09c-4379-8da1-263939d5aa23 from datanode DatanodeRegistration(127.0.0.1:37633, datanodeUuid=759e3899-d572-409a-8cbc-d6e36be1dc36, infoPort=36349, infoSecurePort=0, ipcPort=46235, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365) 2024-11-15T08:26:13,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf649c5161702ec8c with lease ID 0x9fba31096bc8e5bd: from storage DS-10cbb8e8-f09c-4379-8da1-263939d5aa23 node DatanodeRegistration(127.0.0.1:37633, datanodeUuid=759e3899-d572-409a-8cbc-d6e36be1dc36, infoPort=36349, infoSecurePort=0, ipcPort=46235, storageInfo=lv=-57;cid=testClusterID;nsid=1413986334;c=1731659170365), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-15T08:26:13,480 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a 2024-11-15T08:26:13,570 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/zookeeper_0, clientPort=59481, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-15T08:26:13,592 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59481 2024-11-15T08:26:13,610 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:13,614 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:13,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741825_1001 (size=7) 2024-11-15T08:26:13,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741825_1001 (size=7) 2024-11-15T08:26:13,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741825_1001 (size=7) 2024-11-15T08:26:14,003 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 with version=8 2024-11-15T08:26:14,003 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/hbase-staging 2024-11-15T08:26:14,144 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-15T08:26:14,416 INFO [Time-limited test {}] client.ConnectionUtils(128): master/48e8ad8dc936:0 server-side Connection retries=45 2024-11-15T08:26:14,428 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:14,428 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:14,434 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T08:26:14,435 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:14,435 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T08:26:14,632 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-15T08:26:14,717 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-15T08:26:14,731 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-15T08:26:14,736 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T08:26:14,776 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 32272 (auto-detected) 2024-11-15T08:26:14,777 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-15T08:26:14,805 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35169 2024-11-15T08:26:14,839 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35169 connecting to ZooKeeper ensemble=127.0.0.1:59481 2024-11-15T08:26:14,883 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:351690x0, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T08:26:14,886 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35169-0x1011abbf93d0000 connected 2024-11-15T08:26:14,927 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:14,931 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:14,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:26:14,956 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1, hbase.cluster.distributed=false 2024-11-15T08:26:15,002 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T08:26:15,009 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35169 2024-11-15T08:26:15,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35169 2024-11-15T08:26:15,016 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35169 2024-11-15T08:26:15,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35169 2024-11-15T08:26:15,017 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35169 2024-11-15T08:26:15,164 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/48e8ad8dc936:0 server-side Connection retries=45 2024-11-15T08:26:15,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,167 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T08:26:15,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,168 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T08:26:15,172 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T08:26:15,175 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T08:26:15,176 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37511 2024-11-15T08:26:15,179 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37511 connecting to ZooKeeper ensemble=127.0.0.1:59481 2024-11-15T08:26:15,180 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,185 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:375110x0, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T08:26:15,198 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:375110x0, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:26:15,203 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T08:26:15,207 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37511-0x1011abbf93d0001 connected 2024-11-15T08:26:15,214 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T08:26:15,217 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T08:26:15,227 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T08:26:15,238 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37511 2024-11-15T08:26:15,239 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37511 2024-11-15T08:26:15,241 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37511 2024-11-15T08:26:15,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37511 2024-11-15T08:26:15,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37511 2024-11-15T08:26:15,270 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/48e8ad8dc936:0 server-side Connection retries=45 2024-11-15T08:26:15,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,270 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,271 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T08:26:15,271 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,271 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T08:26:15,272 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T08:26:15,272 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T08:26:15,273 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40637 2024-11-15T08:26:15,275 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40637 connecting to ZooKeeper ensemble=127.0.0.1:59481 2024-11-15T08:26:15,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,280 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:406370x0, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T08:26:15,294 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:406370x0, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:26:15,295 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T08:26:15,296 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40637-0x1011abbf93d0002 connected 2024-11-15T08:26:15,297 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T08:26:15,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T08:26:15,302 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T08:26:15,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40637 2024-11-15T08:26:15,307 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40637 2024-11-15T08:26:15,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40637 2024-11-15T08:26:15,314 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40637 2024-11-15T08:26:15,316 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40637 2024-11-15T08:26:15,345 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/48e8ad8dc936:0 server-side Connection retries=45 2024-11-15T08:26:15,345 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,345 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,345 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-15T08:26:15,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-15T08:26:15,346 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-15T08:26:15,346 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-15T08:26:15,346 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-15T08:26:15,348 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34555 2024-11-15T08:26:15,351 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34555 connecting to ZooKeeper ensemble=127.0.0.1:59481 2024-11-15T08:26:15,353 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345550x0, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-15T08:26:15,364 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:345550x0, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:26:15,365 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-15T08:26:15,369 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34555-0x1011abbf93d0003 connected 2024-11-15T08:26:15,369 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-15T08:26:15,371 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-15T08:26:15,373 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-15T08:26:15,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34555 2024-11-15T08:26:15,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34555 2024-11-15T08:26:15,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34555 2024-11-15T08:26:15,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34555 2024-11-15T08:26:15,388 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34555 2024-11-15T08:26:15,408 DEBUG [M:0;48e8ad8dc936:35169 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;48e8ad8dc936:35169 2024-11-15T08:26:15,413 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:15,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,424 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,426 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:15,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T08:26:15,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T08:26:15,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-15T08:26:15,454 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,458 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-15T08:26:15,459 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/48e8ad8dc936,35169,1731659174197 from backup master directory 2024-11-15T08:26:15,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:15,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-15T08:26:15,468 WARN [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T08:26:15,469 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:15,471 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-15T08:26:15,473 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-15T08:26:15,537 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/hbase.id] with ID: ba5ab714-38eb-44a8-a856-45e1f3ffcc35 2024-11-15T08:26:15,538 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.tmp/hbase.id 2024-11-15T08:26:15,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741826_1002 (size=42) 2024-11-15T08:26:15,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741826_1002 (size=42) 2024-11-15T08:26:15,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741826_1002 (size=42) 2024-11-15T08:26:15,567 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.tmp/hbase.id]:[hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/hbase.id] 2024-11-15T08:26:15,620 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-15T08:26:15,625 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-15T08:26:15,649 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 21ms. 2024-11-15T08:26:15,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:15,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741827_1003 (size=196) 2024-11-15T08:26:15,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741827_1003 (size=196) 2024-11-15T08:26:15,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741827_1003 (size=196) 2024-11-15T08:26:15,703 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:26:15,705 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-15T08:26:15,726 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:26:15,732 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T08:26:15,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741828_1004 (size=1189) 2024-11-15T08:26:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741828_1004 (size=1189) 2024-11-15T08:26:15,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741828_1004 (size=1189) 2024-11-15T08:26:15,800 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/data/master/store 2024-11-15T08:26:15,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741829_1005 (size=34) 2024-11-15T08:26:15,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741829_1005 (size=34) 2024-11-15T08:26:15,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741829_1005 (size=34) 2024-11-15T08:26:15,835 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-15T08:26:15,840 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:15,841 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T08:26:15,842 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:26:15,842 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:26:15,844 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T08:26:15,844 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:26:15,844 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:26:15,846 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731659175841Disabling compacts and flushes for region at 1731659175841Disabling writes for close at 1731659175844 (+3 ms)Writing region close event to WAL at 1731659175844Closed at 1731659175844 2024-11-15T08:26:15,848 WARN [master/48e8ad8dc936:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/data/master/store/.initializing 2024-11-15T08:26:15,848 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:15,857 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T08:26:15,872 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=48e8ad8dc936%2C35169%2C1731659174197, suffix=, logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197, archiveDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/oldWALs, maxLogs=10 2024-11-15T08:26:15,894 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876, exclude list is [], retry=0 2024-11-15T08:26:15,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32865,DS-734a140c-0d60-4e41-a710-e466e0a866d2,DISK] 2024-11-15T08:26:15,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37633,DS-a0f7502b-4b1a-4860-bf80-256464e6d99e,DISK] 2024-11-15T08:26:15,914 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38947,DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05,DISK] 2024-11-15T08:26:15,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-15T08:26:15,962 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 2024-11-15T08:26:15,963 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39297:39297),(127.0.0.1/127.0.0.1:38337:38337),(127.0.0.1/127.0.0.1:36349:36349)] 2024-11-15T08:26:15,963 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-15T08:26:15,964 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:15,967 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:15,968 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,012 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,043 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-15T08:26:16,047 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,050 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:16,051 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,054 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-15T08:26:16,054 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,055 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:16,056 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,058 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-15T08:26:16,058 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,059 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:16,060 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,063 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-15T08:26:16,063 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,064 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:16,064 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,069 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,070 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,077 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,078 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,082 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T08:26:16,086 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-15T08:26:16,098 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:16,099 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61105825, jitterRate=-0.08945225179195404}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T08:26:16,107 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731659175982Initializing all the Stores at 1731659175984 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659175985 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659175988 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659175989 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659175989Cleaning up temporary data from old regions at 1731659176078 (+89 ms)Region opened successfully at 1731659176107 (+29 ms) 2024-11-15T08:26:16,108 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-15T08:26:16,159 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45482d8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=48e8ad8dc936/172.17.0.2:0 2024-11-15T08:26:16,197 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-15T08:26:16,211 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-15T08:26:16,212 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-15T08:26:16,215 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-15T08:26:16,217 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-15T08:26:16,224 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-15T08:26:16,224 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-15T08:26:16,261 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-15T08:26:16,282 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-15T08:26:16,286 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-15T08:26:16,291 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-15T08:26:16,295 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-15T08:26:16,297 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-15T08:26:16,301 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-15T08:26:16,308 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-15T08:26:16,311 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-15T08:26:16,316 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-15T08:26:16,318 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-15T08:26:16,346 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-15T08:26:16,349 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-15T08:26:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T08:26:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T08:26:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T08:26:16,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-15T08:26:16,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,365 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=48e8ad8dc936,35169,1731659174197, sessionid=0x1011abbf93d0000, setting cluster-up flag (Was=false) 2024-11-15T08:26:16,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,383 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,395 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-15T08:26:16,399 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:16,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:16,419 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-15T08:26:16,422 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:16,437 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-15T08:26:16,491 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-15T08:26:16,496 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(746): ClusterId : ba5ab714-38eb-44a8-a856-45e1f3ffcc35 2024-11-15T08:26:16,502 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:16,503 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-15T08:26:16,506 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T08:26:16,508 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(746): ClusterId : ba5ab714-38eb-44a8-a856-45e1f3ffcc35 2024-11-15T08:26:16,509 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T08:26:16,517 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(746): ClusterId : ba5ab714-38eb-44a8-a856-45e1f3ffcc35 2024-11-15T08:26:16,519 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-15T08:26:16,522 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T08:26:16,522 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T08:26:16,528 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T08:26:16,529 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T08:26:16,532 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-15T08:26:16,532 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-15T08:26:16,533 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T08:26:16,534 DEBUG [RS:1;48e8ad8dc936:40637 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2889653b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=48e8ad8dc936/172.17.0.2:0 2024-11-15T08:26:16,535 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T08:26:16,535 DEBUG [RS:0;48e8ad8dc936:37511 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@354d7737, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=48e8ad8dc936/172.17.0.2:0 2024-11-15T08:26:16,536 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-15T08:26:16,537 DEBUG [RS:2;48e8ad8dc936:34555 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@92691a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=48e8ad8dc936/172.17.0.2:0 2024-11-15T08:26:16,564 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;48e8ad8dc936:40637 2024-11-15T08:26:16,570 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T08:26:16,570 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T08:26:16,570 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-15T08:26:16,571 INFO [RS:1;48e8ad8dc936:40637 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:16,571 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T08:26:16,571 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;48e8ad8dc936:34555 2024-11-15T08:26:16,571 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T08:26:16,571 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T08:26:16,571 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-15T08:26:16,572 INFO [RS:2;48e8ad8dc936:34555 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:16,572 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T08:26:16,574 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(2659): reportForDuty to master=48e8ad8dc936,35169,1731659174197 with port=40637, startcode=1731659175269 2024-11-15T08:26:16,574 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(2659): reportForDuty to master=48e8ad8dc936,35169,1731659174197 with port=34555, startcode=1731659175344 2024-11-15T08:26:16,589 DEBUG [RS:2;48e8ad8dc936:34555 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T08:26:16,589 DEBUG [RS:1;48e8ad8dc936:40637 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T08:26:16,590 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;48e8ad8dc936:37511 2024-11-15T08:26:16,590 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-15T08:26:16,590 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-15T08:26:16,590 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-15T08:26:16,591 INFO [RS:0;48e8ad8dc936:37511 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:16,591 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-15T08:26:16,593 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(2659): reportForDuty to master=48e8ad8dc936,35169,1731659174197 with port=37511, startcode=1731659175104 2024-11-15T08:26:16,593 DEBUG [RS:0;48e8ad8dc936:37511 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-15T08:26:16,599 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-15T08:26:16,613 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-15T08:26:16,623 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-15T08:26:16,631 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 48e8ad8dc936,35169,1731659174197 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-15T08:26:16,641 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/48e8ad8dc936:0, corePoolSize=5, maxPoolSize=5 2024-11-15T08:26:16,641 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/48e8ad8dc936:0, corePoolSize=5, maxPoolSize=5 2024-11-15T08:26:16,641 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/48e8ad8dc936:0, corePoolSize=5, maxPoolSize=5 2024-11-15T08:26:16,641 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/48e8ad8dc936:0, corePoolSize=5, maxPoolSize=5 2024-11-15T08:26:16,641 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/48e8ad8dc936:0, corePoolSize=10, maxPoolSize=10 2024-11-15T08:26:16,642 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,642 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/48e8ad8dc936:0, corePoolSize=2, maxPoolSize=2 2024-11-15T08:26:16,642 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,656 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57863, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T08:26:16,656 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33079, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T08:26:16,656 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42053, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-15T08:26:16,660 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T08:26:16,660 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-15T08:26:16,664 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T08:26:16,667 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731659206667 2024-11-15T08:26:16,669 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,669 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-15T08:26:16,669 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T08:26:16,670 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-15T08:26:16,674 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T08:26:16,674 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-15T08:26:16,675 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-15T08:26:16,676 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-15T08:26:16,676 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-15T08:26:16,676 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-15T08:26:16,686 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,691 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-15T08:26:16,693 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-15T08:26:16,694 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-15T08:26:16,707 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-15T08:26:16,707 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-15T08:26:16,707 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-15T08:26:16,707 WARN [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-15T08:26:16,707 WARN [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-15T08:26:16,707 WARN [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-15T08:26:16,708 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-15T08:26:16,709 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-15T08:26:16,716 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/48e8ad8dc936:0:becomeActiveMaster-HFileCleaner.large.0-1731659176711,5,FailOnTimeoutGroup] 2024-11-15T08:26:16,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741831_1007 (size=1321) 2024-11-15T08:26:16,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741831_1007 (size=1321) 2024-11-15T08:26:16,720 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/48e8ad8dc936:0:becomeActiveMaster-HFileCleaner.small.0-1731659176716,5,FailOnTimeoutGroup] 2024-11-15T08:26:16,720 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,722 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-15T08:26:16,723 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,724 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741831_1007 (size=1321) 2024-11-15T08:26:16,726 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-15T08:26:16,726 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:16,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741832_1008 (size=32) 2024-11-15T08:26:16,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741832_1008 (size=32) 2024-11-15T08:26:16,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741832_1008 (size=32) 2024-11-15T08:26:16,763 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:16,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T08:26:16,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T08:26:16,781 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:16,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T08:26:16,786 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T08:26:16,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:16,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T08:26:16,796 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T08:26:16,798 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:16,800 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T08:26:16,803 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T08:26:16,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:16,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:16,805 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T08:26:16,806 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740 2024-11-15T08:26:16,808 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(2659): reportForDuty to master=48e8ad8dc936,35169,1731659174197 with port=37511, startcode=1731659175104 2024-11-15T08:26:16,808 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(2659): reportForDuty to master=48e8ad8dc936,35169,1731659174197 with port=34555, startcode=1731659175344 2024-11-15T08:26:16,810 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740 2024-11-15T08:26:16,810 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(2659): reportForDuty to master=48e8ad8dc936,35169,1731659174197 with port=40637, startcode=1731659175269 2024-11-15T08:26:16,811 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:16,815 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(517): Registering regionserver=48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:16,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T08:26:16,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T08:26:16,816 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T08:26:16,821 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T08:26:16,826 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:16,827 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:16,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:16,827 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44023 2024-11-15T08:26:16,827 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T08:26:16,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(517): Registering regionserver=48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:16,831 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64947979, jitterRate=-0.03219969570636749}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T08:26:16,833 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:16,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T08:26:16,833 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169 {}] master.ServerManager(517): Registering regionserver=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:16,833 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:16,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731659176763Initializing all the Stores at 1731659176765 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659176765Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659176772 (+7 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659176772Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659176772Cleaning up temporary data from old regions at 1731659176815 (+43 ms)Region opened successfully at 1731659176834 (+19 ms) 2024-11-15T08:26:16,834 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44023 2024-11-15T08:26:16,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T08:26:16,834 DEBUG [RS:2;48e8ad8dc936:34555 {}] zookeeper.ZKUtil(111): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:16,834 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T08:26:16,834 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T08:26:16,834 WARN [RS:2;48e8ad8dc936:34555 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T08:26:16,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T08:26:16,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T08:26:16,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T08:26:16,835 INFO [RS:2;48e8ad8dc936:34555 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T08:26:16,835 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:16,842 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T08:26:16,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731659176834Disabling compacts and flushes for region at 1731659176834Disabling writes for close at 1731659176835 (+1 ms)Writing region close event to WAL at 1731659176841 (+6 ms)Closed at 1731659176842 (+1 ms) 2024-11-15T08:26:16,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [48e8ad8dc936,37511,1731659175104] 2024-11-15T08:26:16,844 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [48e8ad8dc936,34555,1731659175344] 2024-11-15T08:26:16,849 DEBUG [RS:0;48e8ad8dc936:37511 {}] zookeeper.ZKUtil(111): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:16,849 WARN [RS:0;48e8ad8dc936:37511 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T08:26:16,849 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T08:26:16,849 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-15T08:26:16,849 INFO [RS:0;48e8ad8dc936:37511 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T08:26:16,849 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:16,850 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:16,850 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44023 2024-11-15T08:26:16,850 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-15T08:26:16,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T08:26:16,855 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [48e8ad8dc936,40637,1731659175269] 2024-11-15T08:26:16,858 DEBUG [RS:1;48e8ad8dc936:40637 {}] zookeeper.ZKUtil(111): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:16,858 WARN [RS:1;48e8ad8dc936:40637 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-15T08:26:16,858 INFO [RS:1;48e8ad8dc936:40637 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T08:26:16,858 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:16,860 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-15T08:26:16,871 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T08:26:16,875 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-15T08:26:16,882 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T08:26:16,882 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T08:26:16,882 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-15T08:26:16,902 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T08:26:16,902 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T08:26:16,903 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-15T08:26:16,924 INFO [RS:2;48e8ad8dc936:34555 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T08:26:16,924 INFO [RS:1;48e8ad8dc936:40637 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T08:26:16,924 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,924 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,925 INFO [RS:0;48e8ad8dc936:37511 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-15T08:26:16,925 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,934 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T08:26:16,935 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T08:26:16,936 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-15T08:26:16,942 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T08:26:16,943 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T08:26:16,943 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-15T08:26:16,944 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,944 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,945 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,945 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/48e8ad8dc936:0, corePoolSize=2, maxPoolSize=2 2024-11-15T08:26:16,946 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/48e8ad8dc936:0, corePoolSize=2, maxPoolSize=2 2024-11-15T08:26:16,946 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,946 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,947 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,947 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,947 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,947 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,947 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0, corePoolSize=3, maxPoolSize=3 2024-11-15T08:26:16,947 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0, corePoolSize=3, maxPoolSize=3 2024-11-15T08:26:16,947 DEBUG [RS:2;48e8ad8dc936:34555 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/48e8ad8dc936:0, corePoolSize=3, maxPoolSize=3 2024-11-15T08:26:16,947 DEBUG [RS:0;48e8ad8dc936:37511 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/48e8ad8dc936:0, corePoolSize=3, maxPoolSize=3 2024-11-15T08:26:16,948 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/48e8ad8dc936:0, corePoolSize=2, maxPoolSize=2 2024-11-15T08:26:16,949 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,950 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,950 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,950 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,950 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,950 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/48e8ad8dc936:0, corePoolSize=1, maxPoolSize=1 2024-11-15T08:26:16,950 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0, corePoolSize=3, maxPoolSize=3 2024-11-15T08:26:16,951 DEBUG [RS:1;48e8ad8dc936:40637 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/48e8ad8dc936:0, corePoolSize=3, maxPoolSize=3 2024-11-15T08:26:16,956 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,957 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,957 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,957 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,957 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,957 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,37511,1731659175104-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T08:26:16,960 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,960 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,960 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,961 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,961 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,961 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,40637,1731659175269-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T08:26:16,966 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,966 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,966 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,966 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,966 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,966 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,34555,1731659175344-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T08:26:16,986 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T08:26:16,987 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T08:26:16,987 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-15T08:26:16,989 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,34555,1731659175344-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,989 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,40637,1731659175269-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,989 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,37511,1731659175104-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,989 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,989 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,990 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.Replication(171): 48e8ad8dc936,37511,1731659175104 started 2024-11-15T08:26:16,990 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.Replication(171): 48e8ad8dc936,40637,1731659175269 started 2024-11-15T08:26:16,990 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:16,990 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.Replication(171): 48e8ad8dc936,34555,1731659175344 started 2024-11-15T08:26:17,017 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,017 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,018 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1482): Serving as 48e8ad8dc936,34555,1731659175344, RpcServer on 48e8ad8dc936/172.17.0.2:34555, sessionid=0x1011abbf93d0003 2024-11-15T08:26:17,019 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T08:26:17,019 DEBUG [RS:2;48e8ad8dc936:34555 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:17,019 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1482): Serving as 48e8ad8dc936,40637,1731659175269, RpcServer on 48e8ad8dc936/172.17.0.2:40637, sessionid=0x1011abbf93d0002 2024-11-15T08:26:17,020 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '48e8ad8dc936,34555,1731659175344' 2024-11-15T08:26:17,020 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,020 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T08:26:17,020 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T08:26:17,020 DEBUG [RS:1;48e8ad8dc936:40637 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:17,020 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1482): Serving as 48e8ad8dc936,37511,1731659175104, RpcServer on 48e8ad8dc936/172.17.0.2:37511, sessionid=0x1011abbf93d0001 2024-11-15T08:26:17,020 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-15T08:26:17,020 DEBUG [RS:0;48e8ad8dc936:37511 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:17,020 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '48e8ad8dc936,37511,1731659175104' 2024-11-15T08:26:17,020 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T08:26:17,021 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '48e8ad8dc936,40637,1731659175269' 2024-11-15T08:26:17,021 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-15T08:26:17,022 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T08:26:17,022 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T08:26:17,022 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-15T08:26:17,023 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T08:26:17,023 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T08:26:17,023 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-15T08:26:17,023 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T08:26:17,023 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T08:26:17,023 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-15T08:26:17,023 DEBUG [RS:0;48e8ad8dc936:37511 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:17,023 DEBUG [RS:2;48e8ad8dc936:34555 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:17,023 DEBUG [RS:1;48e8ad8dc936:40637 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:17,023 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '48e8ad8dc936,34555,1731659175344' 2024-11-15T08:26:17,023 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '48e8ad8dc936,37511,1731659175104' 2024-11-15T08:26:17,023 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '48e8ad8dc936,40637,1731659175269' 2024-11-15T08:26:17,023 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T08:26:17,023 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T08:26:17,023 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-15T08:26:17,024 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T08:26:17,024 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T08:26:17,025 DEBUG [RS:1;48e8ad8dc936:40637 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T08:26:17,025 INFO [RS:1;48e8ad8dc936:40637 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T08:26:17,026 INFO [RS:1;48e8ad8dc936:40637 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T08:26:17,026 WARN [48e8ad8dc936:35169 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-15T08:26:17,027 DEBUG [RS:2;48e8ad8dc936:34555 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T08:26:17,027 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-15T08:26:17,027 INFO [RS:2;48e8ad8dc936:34555 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T08:26:17,027 INFO [RS:2;48e8ad8dc936:34555 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T08:26:17,028 DEBUG [RS:0;48e8ad8dc936:37511 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-15T08:26:17,028 INFO [RS:0;48e8ad8dc936:37511 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-15T08:26:17,028 INFO [RS:0;48e8ad8dc936:37511 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-15T08:26:17,131 INFO [RS:2;48e8ad8dc936:34555 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T08:26:17,131 INFO [RS:0;48e8ad8dc936:37511 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T08:26:17,131 INFO [RS:1;48e8ad8dc936:40637 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-15T08:26:17,135 INFO [RS:0;48e8ad8dc936:37511 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=48e8ad8dc936%2C37511%2C1731659175104, suffix=, logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,37511,1731659175104, archiveDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs, maxLogs=32 2024-11-15T08:26:17,135 INFO [RS:2;48e8ad8dc936:34555 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=48e8ad8dc936%2C34555%2C1731659175344, suffix=, logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,34555,1731659175344, archiveDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs, maxLogs=32 2024-11-15T08:26:17,138 INFO [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=48e8ad8dc936%2C40637%2C1731659175269, suffix=, logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269, archiveDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs, maxLogs=32 2024-11-15T08:26:17,156 DEBUG [RS:0;48e8ad8dc936:37511 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,37511,1731659175104/48e8ad8dc936%2C37511%2C1731659175104.1731659177139, exclude list is [], retry=0 2024-11-15T08:26:17,160 DEBUG [RS:2;48e8ad8dc936:34555 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,34555,1731659175344/48e8ad8dc936%2C34555%2C1731659175344.1731659177140, exclude list is [], retry=0 2024-11-15T08:26:17,161 DEBUG [RS:1;48e8ad8dc936:40637 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269/48e8ad8dc936%2C40637%2C1731659175269.1731659177140, exclude list is [], retry=0 2024-11-15T08:26:17,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38947,DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05,DISK] 2024-11-15T08:26:17,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37633,DS-a0f7502b-4b1a-4860-bf80-256464e6d99e,DISK] 2024-11-15T08:26:17,162 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32865,DS-734a140c-0d60-4e41-a710-e466e0a866d2,DISK] 2024-11-15T08:26:17,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37633,DS-a0f7502b-4b1a-4860-bf80-256464e6d99e,DISK] 2024-11-15T08:26:17,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32865,DS-734a140c-0d60-4e41-a710-e466e0a866d2,DISK] 2024-11-15T08:26:17,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38947,DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05,DISK] 2024-11-15T08:26:17,200 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38947,DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05,DISK] 2024-11-15T08:26:17,200 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32865,DS-734a140c-0d60-4e41-a710-e466e0a866d2,DISK] 2024-11-15T08:26:17,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37633,DS-a0f7502b-4b1a-4860-bf80-256464e6d99e,DISK] 2024-11-15T08:26:17,203 INFO [RS:0;48e8ad8dc936:37511 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,37511,1731659175104/48e8ad8dc936%2C37511%2C1731659175104.1731659177139 2024-11-15T08:26:17,210 INFO [RS:2;48e8ad8dc936:34555 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,34555,1731659175344/48e8ad8dc936%2C34555%2C1731659175344.1731659177140 2024-11-15T08:26:17,210 DEBUG [RS:0;48e8ad8dc936:37511 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38337:38337),(127.0.0.1/127.0.0.1:39297:39297),(127.0.0.1/127.0.0.1:36349:36349)] 2024-11-15T08:26:17,211 DEBUG [RS:2;48e8ad8dc936:34555 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39297:39297),(127.0.0.1/127.0.0.1:36349:36349),(127.0.0.1/127.0.0.1:38337:38337)] 2024-11-15T08:26:17,217 INFO [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269/48e8ad8dc936%2C40637%2C1731659175269.1731659177140 2024-11-15T08:26:17,220 DEBUG [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:39297:39297),(127.0.0.1/127.0.0.1:36349:36349),(127.0.0.1/127.0.0.1:38337:38337)] 2024-11-15T08:26:17,280 DEBUG [48e8ad8dc936:35169 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-15T08:26:17,291 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:26:17,299 INFO [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:26:17,299 INFO [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:26:17,299 INFO [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:26:17,299 DEBUG [48e8ad8dc936:35169 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:26:17,309 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:17,317 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 48e8ad8dc936,40637,1731659175269, state=OPENING 2024-11-15T08:26:17,324 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-15T08:26:17,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:17,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:17,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:17,327 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:17,327 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,329 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,330 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-15T08:26:17,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:26:17,516 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T08:26:17,519 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51901, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T08:26:17,533 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-15T08:26:17,533 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-15T08:26:17,534 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-15T08:26:17,539 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=48e8ad8dc936%2C40637%2C1731659175269.meta, suffix=.meta, logDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269, archiveDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs, maxLogs=32 2024-11-15T08:26:17,560 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269/48e8ad8dc936%2C40637%2C1731659175269.meta.1731659177542.meta, exclude list is [], retry=0 2024-11-15T08:26:17,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38947,DS-fcf7e74c-e18f-4d0a-80c4-a96ba0767d05,DISK] 2024-11-15T08:26:17,565 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32865,DS-734a140c-0d60-4e41-a710-e466e0a866d2,DISK] 2024-11-15T08:26:17,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37633,DS-a0f7502b-4b1a-4860-bf80-256464e6d99e,DISK] 2024-11-15T08:26:17,581 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/WALs/48e8ad8dc936,40637,1731659175269/48e8ad8dc936%2C40637%2C1731659175269.meta.1731659177542.meta 2024-11-15T08:26:17,582 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38337:38337),(127.0.0.1/127.0.0.1:39297:39297),(127.0.0.1/127.0.0.1:36349:36349)] 2024-11-15T08:26:17,583 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-15T08:26:17,584 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-15T08:26:17,585 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:17,586 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-15T08:26:17,588 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-15T08:26:17,590 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-15T08:26:17,601 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-15T08:26:17,601 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:17,602 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-15T08:26:17,602 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-15T08:26:17,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-15T08:26:17,613 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-15T08:26:17,613 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:17,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:17,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-15T08:26:17,616 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-15T08:26:17,616 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:17,617 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:17,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-15T08:26:17,619 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-15T08:26:17,620 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:17,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:17,621 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-15T08:26:17,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-15T08:26:17,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:17,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-15T08:26:17,625 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-15T08:26:17,627 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740 2024-11-15T08:26:17,632 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740 2024-11-15T08:26:17,636 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-15T08:26:17,636 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-15T08:26:17,637 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-15T08:26:17,644 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-15T08:26:17,649 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59647270, jitterRate=-0.11118641495704651}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-15T08:26:17,649 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-15T08:26:17,653 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731659177602Writing region info on filesystem at 1731659177603 (+1 ms)Initializing all the Stores at 1731659177609 (+6 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659177609Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659177609Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659177609Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659177610 (+1 ms)Cleaning up temporary data from old regions at 1731659177636 (+26 ms)Running coprocessor post-open hooks at 1731659177649 (+13 ms)Region opened successfully at 1731659177653 (+4 ms) 2024-11-15T08:26:17,664 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731659177506 2024-11-15T08:26:17,677 DEBUG [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-15T08:26:17,678 INFO [RS_OPEN_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-15T08:26:17,680 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:17,682 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 48e8ad8dc936,40637,1731659175269, state=OPEN 2024-11-15T08:26:17,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T08:26:17,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T08:26:17,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T08:26:17,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-15T08:26:17,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,686 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-15T08:26:17,686 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:17,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-15T08:26:17,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=48e8ad8dc936,40637,1731659175269 in 353 msec 2024-11-15T08:26:17,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-15T08:26:17,702 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 835 msec 2024-11-15T08:26:17,704 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-15T08:26:17,704 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-15T08:26:17,727 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:17,729 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:17,760 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:17,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38769, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:17,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.2800 sec 2024-11-15T08:26:17,793 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731659177793, completionTime=-1 2024-11-15T08:26:17,829 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-15T08:26:17,829 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-15T08:26:17,876 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-15T08:26:17,876 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731659237876 2024-11-15T08:26:17,876 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731659297876 2024-11-15T08:26:17,876 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 46 msec 2024-11-15T08:26:17,889 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:26:17,898 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,35169,1731659174197-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,899 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,35169,1731659174197-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,899 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,35169,1731659174197-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,901 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-48e8ad8dc936:35169, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,902 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,902 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:17,909 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-15T08:26:17,943 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.474sec 2024-11-15T08:26:17,946 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-15T08:26:17,948 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-15T08:26:17,949 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-15T08:26:17,950 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-15T08:26:17,950 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-15T08:26:17,951 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,35169,1731659174197-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-15T08:26:17,952 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,35169,1731659174197-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-15T08:26:17,990 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T08:26:17,991 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:17,994 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@64f96e0 2024-11-15T08:26:17,995 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T08:26:17,998 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36019, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T08:26:18,005 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-15T08:26:18,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-15T08:26:18,023 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:26:18,024 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:18,025 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-15T08:26:18,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T08:26:18,033 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:26:18,043 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6337d1c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:18,077 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-15T08:26:18,077 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-15T08:26:18,098 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:18,102 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:18,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741837_1013 (size=349) 2024-11-15T08:26:18,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741837_1013 (size=349) 2024-11-15T08:26:18,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741837_1013 (size=349) 2024-11-15T08:26:18,120 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 626e7c98f2efa065104d27dc4f61e6c7, NAME => 'hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:18,142 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:18,146 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:18,146 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:18,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T08:26:18,147 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dbc0c48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:18,147 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:18,157 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:18,161 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:18,165 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58476, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:18,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@507f57f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:18,169 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:18,179 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:18,180 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:18,196 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33152, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:18,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:18,200 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/test.cache.data in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.tmp.dir in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-15T08:26:18,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/nfs.dump.dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-15T08:26:18,202 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-15T08:26:18,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741838_1014 (size=36) 2024-11-15T08:26:18,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741838_1014 (size=36) 2024-11-15T08:26:18,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741838_1014 (size=36) 2024-11-15T08:26:18,246 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:18,247 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 626e7c98f2efa065104d27dc4f61e6c7, disabling compactions & flushes 2024-11-15T08:26:18,247 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,247 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,247 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. after waiting 0 ms 2024-11-15T08:26:18,247 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,247 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,247 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 626e7c98f2efa065104d27dc4f61e6c7: Waiting for close lock at 1731659178247Disabling compacts and flushes for region at 1731659178247Disabling writes for close at 1731659178247Writing region close event to WAL at 1731659178247Closed at 1731659178247 2024-11-15T08:26:18,252 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:26:18,260 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1731659178253"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659178253"}]},"ts":"1731659178253"} 2024-11-15T08:26:18,268 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-15T08:26:18,271 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:26:18,274 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659178271"}]},"ts":"1731659178271"} 2024-11-15T08:26:18,280 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-15T08:26:18,280 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:26:18,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:26:18,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:26:18,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:26:18,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:26:18,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:26:18,282 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:26:18,282 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:26:18,282 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:26:18,282 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:26:18,283 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:26:18,287 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=626e7c98f2efa065104d27dc4f61e6c7, ASSIGN}] 2024-11-15T08:26:18,290 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=626e7c98f2efa065104d27dc4f61e6c7, ASSIGN 2024-11-15T08:26:18,292 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=626e7c98f2efa065104d27dc4f61e6c7, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:26:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T08:26:18,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741839_1015 (size=592039) 2024-11-15T08:26:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741839_1015 (size=592039) 2024-11-15T08:26:18,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741839_1015 (size=592039) 2024-11-15T08:26:18,446 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T08:26:18,447 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=626e7c98f2efa065104d27dc4f61e6c7, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:18,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=626e7c98f2efa065104d27dc4f61e6c7, ASSIGN because future has completed 2024-11-15T08:26:18,456 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 626e7c98f2efa065104d27dc4f61e6c7, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:26:18,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741840_1016 (size=1663647) 2024-11-15T08:26:18,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741840_1016 (size=1663647) 2024-11-15T08:26:18,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741840_1016 (size=1663647) 2024-11-15T08:26:18,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T08:26:18,668 INFO [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,669 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 626e7c98f2efa065104d27dc4f61e6c7, NAME => 'hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7.', STARTKEY => '', ENDKEY => ''} 2024-11-15T08:26:18,669 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. service=AccessControlService 2024-11-15T08:26:18,669 INFO [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:18,670 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,670 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:18,670 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,670 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,676 INFO [StoreOpener-626e7c98f2efa065104d27dc4f61e6c7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,679 INFO [StoreOpener-626e7c98f2efa065104d27dc4f61e6c7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 626e7c98f2efa065104d27dc4f61e6c7 columnFamilyName l 2024-11-15T08:26:18,680 DEBUG [StoreOpener-626e7c98f2efa065104d27dc4f61e6c7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:18,681 INFO [StoreOpener-626e7c98f2efa065104d27dc4f61e6c7-1 {}] regionserver.HStore(327): Store=626e7c98f2efa065104d27dc4f61e6c7/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:18,681 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,683 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,683 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,684 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,684 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,688 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,692 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:18,693 INFO [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 626e7c98f2efa065104d27dc4f61e6c7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64354817, jitterRate=-0.041038498282432556}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:18,693 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:26:18,694 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 626e7c98f2efa065104d27dc4f61e6c7: Running coprocessor pre-open hook at 1731659178670Writing region info on filesystem at 1731659178670Initializing all the Stores at 1731659178673 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731659178673Cleaning up temporary data from old regions at 1731659178684 (+11 ms)Running coprocessor post-open hooks at 1731659178693 (+9 ms)Region opened successfully at 1731659178694 (+1 ms) 2024-11-15T08:26:18,696 INFO [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., pid=6, masterSystemTime=1731659178638 2024-11-15T08:26:18,702 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=626e7c98f2efa065104d27dc4f61e6c7, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:18,702 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,703 INFO [RS_OPEN_PRIORITY_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:26:18,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 626e7c98f2efa065104d27dc4f61e6c7, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:26:18,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-15T08:26:18,723 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 626e7c98f2efa065104d27dc4f61e6c7, server=48e8ad8dc936,40637,1731659175269 in 253 msec 2024-11-15T08:26:18,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-15T08:26:18,729 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=626e7c98f2efa065104d27dc4f61e6c7, ASSIGN in 437 msec 2024-11-15T08:26:18,731 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:26:18,731 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659178731"}]},"ts":"1731659178731"} 2024-11-15T08:26:18,736 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-15T08:26:18,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:26:18,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 729 msec 2024-11-15T08:26:19,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-15T08:26:19,177 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-15T08:26:19,193 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-15T08:26:19,194 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-15T08:26:19,195 INFO [master/48e8ad8dc936:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=48e8ad8dc936,35169,1731659174197-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-15T08:26:20,010 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:20,159 WARN [Thread-384 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:20,421 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:20,422 WARN [Thread-384 {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-15T08:26:20,424 INFO [Thread-384 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:20,446 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:20,446 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:20,447 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T08:26:20,447 INFO [Thread-384 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:20,447 INFO [Thread-384 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:20,447 INFO [Thread-384 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T08:26:20,449 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a6506b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:20,449 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74822f8a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:20,463 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:20,492 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2526877c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:20,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7709f05b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:20,668 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices as a root resource class 2024-11-15T08:26:20,669 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver as a provider class 2024-11-15T08:26:20,669 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-15T08:26:20,671 INFO [Thread-384 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-15T08:26:20,722 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:20,947 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:21,292 INFO [Thread-384 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.mapreduce.v2.hs.webapp.HsWebServices to GuiceManagedComponentProvider with the scope "PerRequest" 2024-11-15T08:26:21,323 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@f629ad8{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-45927-hadoop-yarn-common-3_4_1_jar-_-any-8147625001255045836/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-15T08:26:21,323 INFO [Thread-384 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@86a27da{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-36783-hadoop-yarn-common-3_4_1_jar-_-any-12596543014902874932/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-15T08:26:21,325 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67837fd5{HTTP/1.1, (http/1.1)}{localhost:45927} 2024-11-15T08:26:21,325 INFO [Time-limited test {}] server.Server(415): Started @14312ms 2024-11-15T08:26:21,345 INFO [Thread-384 {}] server.AbstractConnector(333): Started ServerConnector@3d3325dd{HTTP/1.1, (http/1.1)}{localhost:36783} 2024-11-15T08:26:21,346 INFO [Thread-384 {}] server.Server(415): Started @14333ms 2024-11-15T08:26:21,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741841_1017 (size=5) 2024-11-15T08:26:21,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741841_1017 (size=5) 2024-11-15T08:26:21,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741841_1017 (size=5) 2024-11-15T08:26:22,766 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-15T08:26:22,773 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:22,805 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-15T08:26:22,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:22,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:22,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:22,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-15T08:26:22,850 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:22,851 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fefcaab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:22,852 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57c6a7f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:22,918 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-15T08:26:22,918 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-15T08:26:22,918 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-15T08:26:22,918 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-15T08:26:22,933 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:22,960 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:23,119 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:26:23,129 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:23,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4979e977{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-40293-hadoop-yarn-common-3_4_1_jar-_-any-13157250059409458261/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T08:26:23,162 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a1a80b{HTTP/1.1, (http/1.1)}{localhost:40293} 2024-11-15T08:26:23,162 INFO [Time-limited test {}] server.Server(415): Started @16149ms 2024-11-15T08:26:23,254 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-15T08:26:23,257 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-15T08:26:23,445 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-15T08:26:23,449 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:23,474 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-15T08:26:23,475 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-15T08:26:23,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-15T08:26:23,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-15T08:26:23,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-15T08:26:23,510 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-15T08:26:23,514 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c70a759{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,AVAILABLE} 2024-11-15T08:26:23,515 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@607efe43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-15T08:26:23,587 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-15T08:26:23,587 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-15T08:26:23,587 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-15T08:26:23,588 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-15T08:26:23,597 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:23,606 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:23,742 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-15T08:26:23,747 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@714e385f{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/java.io.tmpdir/jetty-localhost-45295-hadoop-yarn-common-3_4_1_jar-_-any-11300786457574768914/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T08:26:23,748 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2696985a{HTTP/1.1, (http/1.1)}{localhost:45295} 2024-11-15T08:26:23,748 INFO [Time-limited test {}] server.Server(415): Started @16735ms 2024-11-15T08:26:23,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-15T08:26:23,784 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:23,824 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=720, OpenFileDescriptor=780, MaxFileDescriptor=1048576, SystemLoadAverage=213, ProcessCount=11, AvailableMemoryMB=4367 2024-11-15T08:26:23,826 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=720 is superior to 500 2024-11-15T08:26:23,830 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-15T08:26:23,836 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 48e8ad8dc936,35169,1731659174197 2024-11-15T08:26:23,836 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@527d4829 2024-11-15T08:26:23,836 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-15T08:26:23,839 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-15T08:26:23,841 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:26:23,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:23,847 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:26:23,848 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 7 2024-11-15T08:26:23,848 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:23,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T08:26:23,852 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:26:23,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741842_1018 (size=406) 2024-11-15T08:26:23,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741842_1018 (size=406) 2024-11-15T08:26:23,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741842_1018 (size=406) 2024-11-15T08:26:23,876 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0bc57f31f7ca6dcaef0825646e63127d, NAME => 'testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:23,876 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e79cbe27a4edc1b677a5fd9e92c80f0b, NAME => 'testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:23,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741843_1019 (size=67) 2024-11-15T08:26:23,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741843_1019 (size=67) 2024-11-15T08:26:23,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741843_1019 (size=67) 2024-11-15T08:26:23,953 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:23,953 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing e79cbe27a4edc1b677a5fd9e92c80f0b, disabling compactions & flushes 2024-11-15T08:26:23,953 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:23,953 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:23,953 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. after waiting 0 ms 2024-11-15T08:26:23,954 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:23,954 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:23,954 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for e79cbe27a4edc1b677a5fd9e92c80f0b: Waiting for close lock at 1731659183953Disabling compacts and flushes for region at 1731659183953Disabling writes for close at 1731659183953Writing region close event to WAL at 1731659183954 (+1 ms)Closed at 1731659183954 2024-11-15T08:26:23,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741844_1020 (size=67) 2024-11-15T08:26:23,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T08:26:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741844_1020 (size=67) 2024-11-15T08:26:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741844_1020 (size=67) 2024-11-15T08:26:23,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:23,960 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 0bc57f31f7ca6dcaef0825646e63127d, disabling compactions & flushes 2024-11-15T08:26:23,960 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:23,961 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:23,961 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. after waiting 0 ms 2024-11-15T08:26:23,961 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:23,961 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:23,961 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0bc57f31f7ca6dcaef0825646e63127d: Waiting for close lock at 1731659183960Disabling compacts and flushes for region at 1731659183960Disabling writes for close at 1731659183961 (+1 ms)Writing region close event to WAL at 1731659183961Closed at 1731659183961 2024-11-15T08:26:23,963 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:26:23,964 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731659183963"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659183963"}]},"ts":"1731659183963"} 2024-11-15T08:26:23,964 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1731659183963"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659183963"}]},"ts":"1731659183963"} 2024-11-15T08:26:24,008 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:26:24,010 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:26:24,011 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659184011"}]},"ts":"1731659184011"} 2024-11-15T08:26:24,015 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-15T08:26:24,016 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:26:24,020 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:26:24,020 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:26:24,020 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:26:24,020 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:26:24,020 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:26:24,020 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:26:24,020 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:26:24,020 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:26:24,020 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:26:24,021 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:26:24,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, ASSIGN}] 2024-11-15T08:26:24,025 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, ASSIGN 2024-11-15T08:26:24,025 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, ASSIGN 2024-11-15T08:26:24,028 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:26:24,028 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:26:24,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T08:26:24,178 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:26:24,179 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0bc57f31f7ca6dcaef0825646e63127d, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:24,179 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e79cbe27a4edc1b677a5fd9e92c80f0b, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:24,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, ASSIGN because future has completed 2024-11-15T08:26:24,183 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:26:24,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, ASSIGN because future has completed 2024-11-15T08:26:24,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:26:24,338 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T08:26:24,341 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T08:26:24,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43191, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T08:26:24,357 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60445, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T08:26:24,363 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:24,363 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:24,364 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 0bc57f31f7ca6dcaef0825646e63127d, NAME => 'testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:26:24,364 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => e79cbe27a4edc1b677a5fd9e92c80f0b, NAME => 'testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:26:24,364 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. service=AccessControlService 2024-11-15T08:26:24,364 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. service=AccessControlService 2024-11-15T08:26:24,364 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:24,364 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,365 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,367 INFO [StoreOpener-e79cbe27a4edc1b677a5fd9e92c80f0b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,370 INFO [StoreOpener-e79cbe27a4edc1b677a5fd9e92c80f0b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e79cbe27a4edc1b677a5fd9e92c80f0b columnFamilyName cf 2024-11-15T08:26:24,370 DEBUG [StoreOpener-e79cbe27a4edc1b677a5fd9e92c80f0b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:24,370 INFO [StoreOpener-0bc57f31f7ca6dcaef0825646e63127d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,371 INFO [StoreOpener-e79cbe27a4edc1b677a5fd9e92c80f0b-1 {}] regionserver.HStore(327): Store=e79cbe27a4edc1b677a5fd9e92c80f0b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:24,371 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,372 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,372 INFO [StoreOpener-0bc57f31f7ca6dcaef0825646e63127d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0bc57f31f7ca6dcaef0825646e63127d columnFamilyName cf 2024-11-15T08:26:24,372 DEBUG [StoreOpener-0bc57f31f7ca6dcaef0825646e63127d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:24,373 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,373 INFO [StoreOpener-0bc57f31f7ca6dcaef0825646e63127d-1 {}] regionserver.HStore(327): Store=0bc57f31f7ca6dcaef0825646e63127d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:24,373 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,374 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,374 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,374 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,375 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,375 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,375 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,377 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,378 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,380 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:24,381 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened e79cbe27a4edc1b677a5fd9e92c80f0b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60184607, jitterRate=-0.10317946970462799}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:24,381 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:24,382 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:24,383 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for e79cbe27a4edc1b677a5fd9e92c80f0b: Running coprocessor pre-open hook at 1731659184365Writing region info on filesystem at 1731659184365Initializing all the Stores at 1731659184367 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659184367Cleaning up temporary data from old regions at 1731659184374 (+7 ms)Running coprocessor post-open hooks at 1731659184381 (+7 ms)Region opened successfully at 1731659184382 (+1 ms) 2024-11-15T08:26:24,383 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 0bc57f31f7ca6dcaef0825646e63127d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67108624, jitterRate=-3.5762786865234375E-6}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:24,383 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:24,383 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 0bc57f31f7ca6dcaef0825646e63127d: Running coprocessor pre-open hook at 1731659184365Writing region info on filesystem at 1731659184365Initializing all the Stores at 1731659184367 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659184367Cleaning up temporary data from old regions at 1731659184375 (+8 ms)Running coprocessor post-open hooks at 1731659184383 (+8 ms)Region opened successfully at 1731659184383 2024-11-15T08:26:24,384 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., pid=11, masterSystemTime=1731659184340 2024-11-15T08:26:24,385 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d., pid=10, masterSystemTime=1731659184337 2024-11-15T08:26:24,388 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:24,388 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:24,389 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=e79cbe27a4edc1b677a5fd9e92c80f0b, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:24,389 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:24,390 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:24,391 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=0bc57f31f7ca6dcaef0825646e63127d, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:24,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:26:24,396 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:26:24,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-15T08:26:24,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b, server=48e8ad8dc936,37511,1731659175104 in 216 msec 2024-11-15T08:26:24,407 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-15T08:26:24,408 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d, server=48e8ad8dc936,34555,1731659175344 in 218 msec 2024-11-15T08:26:24,410 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, ASSIGN in 385 msec 2024-11-15T08:26:24,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-15T08:26:24,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, ASSIGN in 387 msec 2024-11-15T08:26:24,415 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:26:24,415 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659184415"}]},"ts":"1731659184415"} 2024-11-15T08:26:24,419 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-15T08:26:24,422 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:26:24,426 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-15T08:26:24,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:24,444 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:24,445 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:24,445 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:24,447 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-15T08:26:24,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:24,455 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-15T08:26:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:24,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:26:24,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:24,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:24,498 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:24,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 652 msec 2024-11-15T08:26:24,499 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:24,502 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:24,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:26:24,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-15T08:26:24,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-15T08:26:24,712 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-15T08:26:24,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:26:24,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-15T08:26:24,714 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T08:26:24,714 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-15T08:26:24,715 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-15T08:26:24,715 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-15T08:26:24,717 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:26:24,717 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-15T08:26:24,717 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-15T08:26:24,717 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-15T08:26:24,717 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T08:26:24,717 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-15T08:26:24,718 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-15T08:26:24,718 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-15T08:26:24,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-15T08:26:24,986 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T08:26:24,986 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-11-15T08:26:24,987 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:24,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-11-15T08:26:24,993 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:24,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-11-15T08:26:24,997 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T08:26:25,007 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T08:26:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659185007 (current time:1731659185007). 2024-11-15T08:26:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:26:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-15T08:26:25,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:26:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c028b3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:25,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:25,011 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:25,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:25,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:25,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bad5cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:25,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:25,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,013 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46062, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:25,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d3d883a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:25,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:25,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:25,019 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:25,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:25,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,028 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4744e54a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:25,030 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:25,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:25,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:25,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b40c746, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:25,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:25,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,032 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46080, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:25,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@481a17a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:25,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:25,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:25,036 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:25,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:25,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,042 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-15T08:26:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:26:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T08:26:25,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-15T08:26:25,055 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:26:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T08:26:25,062 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:26:25,077 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:26:25,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741845_1021 (size=167) 2024-11-15T08:26:25,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741845_1021 (size=167) 2024-11-15T08:26:25,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741845_1021 (size=167) 2024-11-15T08:26:25,097 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:26:25,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b}] 2024-11-15T08:26:25,106 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:25,106 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:25,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T08:26:25,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-15T08:26:25,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-15T08:26:25,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:25,268 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:25,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 0bc57f31f7ca6dcaef0825646e63127d: 2024-11-15T08:26:25,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for e79cbe27a4edc1b677a5fd9e92c80f0b: 2024-11-15T08:26:25,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. for emptySnaptb0-testExportWithTargetName completed. 2024-11-15T08:26:25,273 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. for emptySnaptb0-testExportWithTargetName completed. 2024-11-15T08:26:25,274 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-15T08:26:25,274 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-15T08:26:25,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:25,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:25,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:26:25,280 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:26:25,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741846_1022 (size=70) 2024-11-15T08:26:25,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741846_1022 (size=70) 2024-11-15T08:26:25,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741847_1023 (size=70) 2024-11-15T08:26:25,297 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741847_1023 (size=70) 2024-11-15T08:26:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741846_1022 (size=70) 2024-11-15T08:26:25,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741847_1023 (size=70) 2024-11-15T08:26:25,298 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:25,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-15T08:26:25,301 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-15T08:26:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-15T08:26:25,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-15T08:26:25,303 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:25,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:25,304 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:25,304 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:25,308 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b in 205 msec 2024-11-15T08:26:25,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-15T08:26:25,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d in 205 msec 2024-11-15T08:26:25,310 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:26:25,312 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:26:25,316 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:26:25,316 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-15T08:26:25,319 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-15T08:26:25,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741848_1024 (size=549) 2024-11-15T08:26:25,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741848_1024 (size=549) 2024-11-15T08:26:25,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741848_1024 (size=549) 2024-11-15T08:26:25,344 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:26:25,356 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:26:25,357 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-15T08:26:25,360 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:26:25,361 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-15T08:26:25,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 312 msec 2024-11-15T08:26:25,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-15T08:26:25,377 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T08:26:25,396 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='0ecf637c822ff44b62aa628ce4a318749', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:26:25,400 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='27223ce97b33fb33216dee25c948b170d', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:25,403 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='1885b90194e42a6c008549a555fc5135e', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:25,405 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='31cd2c36ca1b7cde62c519cf1d665380d', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:25,406 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='54ce76d3382e88f4f5e6cbc4b4d427787', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:25,407 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='48780999878d5900dd291ef26253d87cd', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:25,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='654e7ab97df049f7633c0702df7b7c397', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:25,409 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:25,411 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:25,419 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48146, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:25,419 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43356, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:25,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34555 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:26:25,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:26:25,434 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T08:26:25,439 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-15T08:26:25,440 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:25,441 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:25,445 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T08:26:25,463 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T08:26:25,473 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-15T08:26:25,486 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T08:26:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659185486 (current time:1731659185486). 2024-11-15T08:26:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:26:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-15T08:26:25,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:26:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@457feaaa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:25,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:25,489 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:25,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:25,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:25,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7982a7f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:25,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:25,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,492 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46100, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:25,493 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32aaa776, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:25,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:25,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:25,497 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:25,498 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169. 2024-11-15T08:26:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,499 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@614959f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:25,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:25,501 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:25,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:25,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:25,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40315fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:25,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:25,503 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,503 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:25,504 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4959e5c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:25,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:25,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:25,507 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:25,509 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55062, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:25,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:25,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169. 2024-11-15T08:26:25,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:25,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-15T08:26:25,515 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:25,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:26:25,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-15T08:26:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-15T08:26:25,520 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:26:25,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T08:26:25,522 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:26:25,528 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:26:25,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741849_1025 (size=162) 2024-11-15T08:26:25,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741849_1025 (size=162) 2024-11-15T08:26:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741849_1025 (size=162) 2024-11-15T08:26:25,559 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:26:25,560 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b}] 2024-11-15T08:26:25,562 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:25,562 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T08:26:25,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-15T08:26:25,716 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-15T08:26:25,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:25,717 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:25,721 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing e79cbe27a4edc1b677a5fd9e92c80f0b 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-15T08:26:25,721 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 0bc57f31f7ca6dcaef0825646e63127d 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-15T08:26:25,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/.tmp/cf/3b4ecc3b44004ac88d2edb5d9e60720a is 71, key is 0b2154c5b3e5e04ec725f675b292db6b/cf:q/1731659185420/Put/seqid=0 2024-11-15T08:26:25,802 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/.tmp/cf/f8d552947916490ea11862c106174d87 is 71, key is 113561502f2d728f9db66627617c5f21/cf:q/1731659185427/Put/seqid=0 2024-11-15T08:26:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T08:26:25,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741850_1026 (size=5286) 2024-11-15T08:26:25,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741850_1026 (size=5286) 2024-11-15T08:26:25,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741850_1026 (size=5286) 2024-11-15T08:26:25,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741851_1027 (size=8324) 2024-11-15T08:26:25,858 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/.tmp/cf/3b4ecc3b44004ac88d2edb5d9e60720a 2024-11-15T08:26:25,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741851_1027 (size=8324) 2024-11-15T08:26:25,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741851_1027 (size=8324) 2024-11-15T08:26:25,865 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/.tmp/cf/f8d552947916490ea11862c106174d87 2024-11-15T08:26:25,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/.tmp/cf/3b4ecc3b44004ac88d2edb5d9e60720a as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf/3b4ecc3b44004ac88d2edb5d9e60720a 2024-11-15T08:26:25,954 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/.tmp/cf/f8d552947916490ea11862c106174d87 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf/f8d552947916490ea11862c106174d87 2024-11-15T08:26:25,971 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf/3b4ecc3b44004ac88d2edb5d9e60720a, entries=3, sequenceid=6, filesize=5.2 K 2024-11-15T08:26:25,980 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf/f8d552947916490ea11862c106174d87, entries=47, sequenceid=6, filesize=8.1 K 2024-11-15T08:26:25,987 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for e79cbe27a4edc1b677a5fd9e92c80f0b in 265ms, sequenceid=6, compaction requested=false 2024-11-15T08:26:25,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-15T08:26:25,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for e79cbe27a4edc1b677a5fd9e92c80f0b: 2024-11-15T08:26:25,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. for snaptb0-testExportWithTargetName completed. 2024-11-15T08:26:25,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-15T08:26:25,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:25,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf/f8d552947916490ea11862c106174d87] hfiles 2024-11-15T08:26:25,992 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 0bc57f31f7ca6dcaef0825646e63127d in 259ms, sequenceid=6, compaction requested=false 2024-11-15T08:26:25,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 0bc57f31f7ca6dcaef0825646e63127d: 2024-11-15T08:26:25,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. for snaptb0-testExportWithTargetName completed. 2024-11-15T08:26:25,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-15T08:26:25,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:25,992 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf/3b4ecc3b44004ac88d2edb5d9e60720a] hfiles 2024-11-15T08:26:25,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf/3b4ecc3b44004ac88d2edb5d9e60720a for snapshot=snaptb0-testExportWithTargetName 2024-11-15T08:26:25,994 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf/f8d552947916490ea11862c106174d87 for snapshot=snaptb0-testExportWithTargetName 2024-11-15T08:26:26,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741852_1028 (size=109) 2024-11-15T08:26:26,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741852_1028 (size=109) 2024-11-15T08:26:26,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741852_1028 (size=109) 2024-11-15T08:26:26,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:26,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-15T08:26:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-15T08:26:26,022 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:26,022 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:26,027 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b in 464 msec 2024-11-15T08:26:26,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741853_1029 (size=109) 2024-11-15T08:26:26,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741853_1029 (size=109) 2024-11-15T08:26:26,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741853_1029 (size=109) 2024-11-15T08:26:26,041 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:26,042 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-15T08:26:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-15T08:26:26,043 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:26,043 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:26,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-11-15T08:26:26,054 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:26:26,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d in 488 msec 2024-11-15T08:26:26,056 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:26:26,058 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:26:26,058 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-15T08:26:26,060 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-15T08:26:26,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741854_1030 (size=627) 2024-11-15T08:26:26,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741854_1030 (size=627) 2024-11-15T08:26:26,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741854_1030 (size=627) 2024-11-15T08:26:26,087 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:26:26,096 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:26:26,097 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-15T08:26:26,100 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:26:26,100 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-15T08:26:26,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 584 msec 2024-11-15T08:26:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-15T08:26:26,147 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T08:26:26,148 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147 2024-11-15T08:26:26,148 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:26,234 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:26,234 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-15T08:26:26,317 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:26:26,339 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-15T08:26:26,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741855_1031 (size=627) 2024-11-15T08:26:26,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741855_1031 (size=627) 2024-11-15T08:26:26,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741855_1031 (size=627) 2024-11-15T08:26:26,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741856_1032 (size=162) 2024-11-15T08:26:26,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741856_1032 (size=162) 2024-11-15T08:26:26,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741856_1032 (size=162) 2024-11-15T08:26:26,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741857_1033 (size=154) 2024-11-15T08:26:26,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741857_1033 (size=154) 2024-11-15T08:26:26,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741857_1033 (size=154) 2024-11-15T08:26:26,428 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:26,429 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:26,429 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,608 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-18273636991880479630.jar 2024-11-15T08:26:27,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-9484036858892160077.jar 2024-11-15T08:26:27,692 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:27,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:26:27,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:26:27,695 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:26:27,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:26:27,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:26:27,696 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:26:27,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:26:27,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:26:27,697 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:26:27,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:26:27,698 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:26:27,700 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:27,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:27,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:26:27,701 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:27,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:27,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:26:27,702 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:26:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741858_1034 (size=131440) 2024-11-15T08:26:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741858_1034 (size=131440) 2024-11-15T08:26:27,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741858_1034 (size=131440) 2024-11-15T08:26:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741859_1035 (size=4188619) 2024-11-15T08:26:27,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741859_1035 (size=4188619) 2024-11-15T08:26:27,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741859_1035 (size=4188619) 2024-11-15T08:26:27,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741860_1036 (size=1323991) 2024-11-15T08:26:27,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741860_1036 (size=1323991) 2024-11-15T08:26:27,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741860_1036 (size=1323991) 2024-11-15T08:26:27,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741861_1037 (size=903738) 2024-11-15T08:26:27,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741861_1037 (size=903738) 2024-11-15T08:26:27,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741861_1037 (size=903738) 2024-11-15T08:26:28,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741862_1038 (size=8360083) 2024-11-15T08:26:28,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741862_1038 (size=8360083) 2024-11-15T08:26:28,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741862_1038 (size=8360083) 2024-11-15T08:26:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741863_1039 (size=1877034) 2024-11-15T08:26:28,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741863_1039 (size=1877034) 2024-11-15T08:26:28,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741863_1039 (size=1877034) 2024-11-15T08:26:28,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741864_1040 (size=77835) 2024-11-15T08:26:28,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741864_1040 (size=77835) 2024-11-15T08:26:28,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741864_1040 (size=77835) 2024-11-15T08:26:28,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741865_1041 (size=30949) 2024-11-15T08:26:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741865_1041 (size=30949) 2024-11-15T08:26:28,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741865_1041 (size=30949) 2024-11-15T08:26:28,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741866_1042 (size=1597327) 2024-11-15T08:26:28,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741866_1042 (size=1597327) 2024-11-15T08:26:28,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741866_1042 (size=1597327) 2024-11-15T08:26:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741867_1043 (size=4695811) 2024-11-15T08:26:28,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741867_1043 (size=4695811) 2024-11-15T08:26:28,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741867_1043 (size=4695811) 2024-11-15T08:26:28,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741868_1044 (size=232957) 2024-11-15T08:26:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741868_1044 (size=232957) 2024-11-15T08:26:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741868_1044 (size=232957) 2024-11-15T08:26:28,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741869_1045 (size=127628) 2024-11-15T08:26:28,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741869_1045 (size=127628) 2024-11-15T08:26:28,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741869_1045 (size=127628) 2024-11-15T08:26:28,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741870_1046 (size=20406) 2024-11-15T08:26:28,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741870_1046 (size=20406) 2024-11-15T08:26:28,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741870_1046 (size=20406) 2024-11-15T08:26:28,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741871_1047 (size=5175431) 2024-11-15T08:26:28,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741871_1047 (size=5175431) 2024-11-15T08:26:28,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741871_1047 (size=5175431) 2024-11-15T08:26:28,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741872_1048 (size=440656) 2024-11-15T08:26:28,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741872_1048 (size=440656) 2024-11-15T08:26:28,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741872_1048 (size=440656) 2024-11-15T08:26:28,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741873_1049 (size=217634) 2024-11-15T08:26:28,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741873_1049 (size=217634) 2024-11-15T08:26:28,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741873_1049 (size=217634) 2024-11-15T08:26:28,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741874_1050 (size=1832290) 2024-11-15T08:26:28,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741874_1050 (size=1832290) 2024-11-15T08:26:28,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741874_1050 (size=1832290) 2024-11-15T08:26:28,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741875_1051 (size=322274) 2024-11-15T08:26:28,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741875_1051 (size=322274) 2024-11-15T08:26:28,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741875_1051 (size=322274) 2024-11-15T08:26:28,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741876_1052 (size=503880) 2024-11-15T08:26:28,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741876_1052 (size=503880) 2024-11-15T08:26:28,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741876_1052 (size=503880) 2024-11-15T08:26:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741877_1053 (size=29229) 2024-11-15T08:26:28,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741877_1053 (size=29229) 2024-11-15T08:26:28,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741877_1053 (size=29229) 2024-11-15T08:26:28,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741878_1054 (size=6424751) 2024-11-15T08:26:28,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741878_1054 (size=6424751) 2024-11-15T08:26:28,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741878_1054 (size=6424751) 2024-11-15T08:26:28,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741879_1055 (size=24096) 2024-11-15T08:26:28,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741879_1055 (size=24096) 2024-11-15T08:26:28,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741879_1055 (size=24096) 2024-11-15T08:26:28,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741880_1056 (size=111872) 2024-11-15T08:26:28,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741880_1056 (size=111872) 2024-11-15T08:26:28,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741880_1056 (size=111872) 2024-11-15T08:26:28,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741881_1057 (size=45609) 2024-11-15T08:26:28,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741881_1057 (size=45609) 2024-11-15T08:26:28,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741881_1057 (size=45609) 2024-11-15T08:26:28,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741882_1058 (size=136454) 2024-11-15T08:26:28,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741882_1058 (size=136454) 2024-11-15T08:26:28,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741882_1058 (size=136454) 2024-11-15T08:26:28,657 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:26:28,663 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-15T08:26:28,676 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:26:28,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741883_1059 (size=342) 2024-11-15T08:26:28,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741883_1059 (size=342) 2024-11-15T08:26:28,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741883_1059 (size=342) 2024-11-15T08:26:29,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741884_1060 (size=15) 2024-11-15T08:26:29,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741884_1060 (size=15) 2024-11-15T08:26:29,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741884_1060 (size=15) 2024-11-15T08:26:29,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741885_1061 (size=303737) 2024-11-15T08:26:29,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741885_1061 (size=303737) 2024-11-15T08:26:29,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741885_1061 (size=303737) 2024-11-15T08:26:29,869 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:26:29,910 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:26:29,910 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:26:30,465 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0001_000001 (auth:SIMPLE) from 127.0.0.1:35492 2024-11-15T08:26:39,867 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0001_000001 (auth:SIMPLE) from 127.0.0.1:59964 2024-11-15T08:26:40,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741886_1062 (size=349387) 2024-11-15T08:26:40,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741886_1062 (size=349387) 2024-11-15T08:26:40,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741886_1062 (size=349387) 2024-11-15T08:26:42,159 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0001_000001 (auth:SIMPLE) from 127.0.0.1:41242 2024-11-15T08:26:43,476 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:26:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741887_1063 (size=8324) 2024-11-15T08:26:47,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741887_1063 (size=8324) 2024-11-15T08:26:47,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741887_1063 (size=8324) 2024-11-15T08:26:47,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741888_1064 (size=5286) 2024-11-15T08:26:47,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741888_1064 (size=5286) 2024-11-15T08:26:47,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741888_1064 (size=5286) 2024-11-15T08:26:47,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741889_1065 (size=17419) 2024-11-15T08:26:47,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741889_1065 (size=17419) 2024-11-15T08:26:47,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741889_1065 (size=17419) 2024-11-15T08:26:47,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741890_1066 (size=464) 2024-11-15T08:26:47,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741890_1066 (size=464) 2024-11-15T08:26:47,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741890_1066 (size=464) 2024-11-15T08:26:47,765 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0001/container_1731659181416_0001_01_000002/launch_container.sh] 2024-11-15T08:26:47,765 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0001/container_1731659181416_0001_01_000002/container_tokens] 2024-11-15T08:26:47,765 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0001/container_1731659181416_0001_01_000002/sysfs] 2024-11-15T08:26:47,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741891_1067 (size=17419) 2024-11-15T08:26:47,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741891_1067 (size=17419) 2024-11-15T08:26:47,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741891_1067 (size=17419) 2024-11-15T08:26:47,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741892_1068 (size=349387) 2024-11-15T08:26:47,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741892_1068 (size=349387) 2024-11-15T08:26:47,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741892_1068 (size=349387) 2024-11-15T08:26:47,999 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0001_000001 (auth:SIMPLE) from 127.0.0.1:41250 2024-11-15T08:26:49,304 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:26:49,305 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:26:49,323 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: testExportWithTargetName 2024-11-15T08:26:49,323 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:26:49,324 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:26:49,325 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-15T08:26:49,325 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-15T08:26:49,325 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-15T08:26:49,325 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147/.hbase-snapshot/testExportWithTargetName 2024-11-15T08:26:49,326 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-15T08:26:49,326 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659186147/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-15T08:26:49,358 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-15T08:26:49,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,371 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659209371"}]},"ts":"1731659209371"} 2024-11-15T08:26:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-15T08:26:49,374 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-15T08:26:49,374 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-15T08:26:49,377 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-15T08:26:49,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, UNASSIGN}, {pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, UNASSIGN}] 2024-11-15T08:26:49,386 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, UNASSIGN 2024-11-15T08:26:49,387 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, UNASSIGN 2024-11-15T08:26:49,389 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=0bc57f31f7ca6dcaef0825646e63127d, regionState=CLOSING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:49,389 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=e79cbe27a4edc1b677a5fd9e92c80f0b, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:49,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=21, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, UNASSIGN because future has completed 2024-11-15T08:26:49,394 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:26:49,394 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:26:49,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, UNASSIGN because future has completed 2024-11-15T08:26:49,408 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:26:49,409 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:26:49,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-15T08:26:49,566 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(122): Close e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:49,566 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:26:49,567 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1722): Closing e79cbe27a4edc1b677a5fd9e92c80f0b, disabling compactions & flushes 2024-11-15T08:26:49,567 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:49,567 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:49,568 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. after waiting 0 ms 2024-11-15T08:26:49,568 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:49,570 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:49,570 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:26:49,570 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 0bc57f31f7ca6dcaef0825646e63127d, disabling compactions & flushes 2024-11-15T08:26:49,570 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:49,570 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:49,570 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. after waiting 0 ms 2024-11-15T08:26:49,570 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:49,596 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:26:49,610 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:26:49,610 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d. 2024-11-15T08:26:49,611 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 0bc57f31f7ca6dcaef0825646e63127d: Waiting for close lock at 1731659209570Running coprocessor pre-close hooks at 1731659209570Disabling compacts and flushes for region at 1731659209570Disabling writes for close at 1731659209570Writing region close event to WAL at 1731659209577 (+7 ms)Running coprocessor post-close hooks at 1731659209598 (+21 ms)Closed at 1731659209610 (+12 ms) 2024-11-15T08:26:49,611 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:26:49,615 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:26:49,615 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b. 2024-11-15T08:26:49,615 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] regionserver.HRegion(1676): Region close journal for e79cbe27a4edc1b677a5fd9e92c80f0b: Waiting for close lock at 1731659209567Running coprocessor pre-close hooks at 1731659209567Disabling compacts and flushes for region at 1731659209567Disabling writes for close at 1731659209568 (+1 ms)Writing region close event to WAL at 1731659209582 (+14 ms)Running coprocessor post-close hooks at 1731659209614 (+32 ms)Closed at 1731659209615 (+1 ms) 2024-11-15T08:26:49,618 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=20 updating hbase:meta row=0bc57f31f7ca6dcaef0825646e63127d, regionState=CLOSED 2024-11-15T08:26:49,621 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=22}] handler.UnassignRegionHandler(157): Closed e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:49,621 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:49,622 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=20, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:26:49,623 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=21 updating hbase:meta row=e79cbe27a4edc1b677a5fd9e92c80f0b, regionState=CLOSED 2024-11-15T08:26:49,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE, hasLock=false; CloseRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:26:49,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=20 2024-11-15T08:26:49,632 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=20, state=SUCCESS, hasLock=false; CloseRegionProcedure 0bc57f31f7ca6dcaef0825646e63127d, server=48e8ad8dc936,34555,1731659175344 in 216 msec 2024-11-15T08:26:49,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-15T08:26:49,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; CloseRegionProcedure e79cbe27a4edc1b677a5fd9e92c80f0b, server=48e8ad8dc936,37511,1731659175104 in 235 msec 2024-11-15T08:26:49,638 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=0bc57f31f7ca6dcaef0825646e63127d, UNASSIGN in 248 msec 2024-11-15T08:26:49,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=21, resume processing ppid=19 2024-11-15T08:26:49,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, ppid=19, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=e79cbe27a4edc1b677a5fd9e92c80f0b, UNASSIGN in 251 msec 2024-11-15T08:26:49,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-15T08:26:49,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 271 msec 2024-11-15T08:26:49,656 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659209655"}]},"ts":"1731659209655"} 2024-11-15T08:26:49,662 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-15T08:26:49,662 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-15T08:26:49,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 304 msec 2024-11-15T08:26:49,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-15T08:26:49,687 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T08:26:49,692 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-15T08:26:49,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,702 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=24, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-15T08:26:49,709 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=24, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,715 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-15T08:26:49,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,722 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-15T08:26:49,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-15T08:26:49,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-15T08:26:49,723 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-15T08:26:49,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:49,724 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:49,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-15T08:26:49,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:49,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:49,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-15T08:26:49,728 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:49,738 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/recovered.edits] 2024-11-15T08:26:49,744 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:49,752 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/recovered.edits] 2024-11-15T08:26:49,760 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf/3b4ecc3b44004ac88d2edb5d9e60720a to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/cf/3b4ecc3b44004ac88d2edb5d9e60720a 2024-11-15T08:26:49,763 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf/f8d552947916490ea11862c106174d87 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/cf/f8d552947916490ea11862c106174d87 2024-11-15T08:26:49,768 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d/recovered.edits/9.seqid 2024-11-15T08:26:49,772 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/0bc57f31f7ca6dcaef0825646e63127d 2024-11-15T08:26:49,772 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b/recovered.edits/9.seqid 2024-11-15T08:26:49,773 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithTargetName/e79cbe27a4edc1b677a5fd9e92c80f0b 2024-11-15T08:26:49,773 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-15T08:26:49,778 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=24, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-15T08:26:49,790 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-15T08:26:49,798 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-15T08:26:49,801 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=24, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,802 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-15T08:26:49,802 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659209802"}]},"ts":"9223372036854775807"} 2024-11-15T08:26:49,802 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659209802"}]},"ts":"9223372036854775807"} 2024-11-15T08:26:49,809 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:26:49,809 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0bc57f31f7ca6dcaef0825646e63127d, NAME => 'testtb-testExportWithTargetName,,1731659183840.0bc57f31f7ca6dcaef0825646e63127d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e79cbe27a4edc1b677a5fd9e92c80f0b, NAME => 'testtb-testExportWithTargetName,1,1731659183840.e79cbe27a4edc1b677a5fd9e92c80f0b.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:26:49,809 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-15T08:26:49,810 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659209810"}]},"ts":"9223372036854775807"} 2024-11-15T08:26:49,814 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-15T08:26:49,819 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=24, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-15T08:26:49,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 126 msec 2024-11-15T08:26:49,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=24 2024-11-15T08:26:49,837 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-15T08:26:49,837 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-15T08:26:49,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-15T08:26:49,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-15T08:26:49,866 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-15T08:26:49,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-15T08:26:49,909 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=771 (was 720) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:37301 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_907030716_1 at /127.0.0.1:55368 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37301 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:54612 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:37113 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:55400 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1254 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43321 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:43126 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37113 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3662) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 780) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=379 (was 213) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1680 (was 4367) 2024-11-15T08:26:49,910 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=771 is superior to 500 2024-11-15T08:26:49,937 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=771, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=379, ProcessCount=19, AvailableMemoryMB=1679 2024-11-15T08:26:49,938 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=771 is superior to 500 2024-11-15T08:26:49,941 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:26:49,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:26:49,946 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:26:49,946 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:49,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 25 2024-11-15T08:26:49,948 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:26:49,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T08:26:49,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741893_1069 (size=404) 2024-11-15T08:26:49,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741893_1069 (size=404) 2024-11-15T08:26:49,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741893_1069 (size=404) 2024-11-15T08:26:50,002 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 215aac9a0193d4f77a31bd916345d889, NAME => 'testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:50,015 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c5fbc719b9109b686ccc0c7d4ece1343, NAME => 'testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:50,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T08:26:50,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741894_1070 (size=65) 2024-11-15T08:26:50,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741894_1070 (size=65) 2024-11-15T08:26:50,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741894_1070 (size=65) 2024-11-15T08:26:50,082 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:50,082 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 215aac9a0193d4f77a31bd916345d889, disabling compactions & flushes 2024-11-15T08:26:50,082 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,082 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,082 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. after waiting 0 ms 2024-11-15T08:26:50,082 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,082 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,082 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 215aac9a0193d4f77a31bd916345d889: Waiting for close lock at 1731659210082Disabling compacts and flushes for region at 1731659210082Disabling writes for close at 1731659210082Writing region close event to WAL at 1731659210082Closed at 1731659210082 2024-11-15T08:26:50,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741895_1071 (size=65) 2024-11-15T08:26:50,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741895_1071 (size=65) 2024-11-15T08:26:50,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741895_1071 (size=65) 2024-11-15T08:26:50,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:50,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing c5fbc719b9109b686ccc0c7d4ece1343, disabling compactions & flushes 2024-11-15T08:26:50,088 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. after waiting 0 ms 2024-11-15T08:26:50,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,088 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,088 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for c5fbc719b9109b686ccc0c7d4ece1343: Waiting for close lock at 1731659210088Disabling compacts and flushes for region at 1731659210088Disabling writes for close at 1731659210088Writing region close event to WAL at 1731659210088Closed at 1731659210088 2024-11-15T08:26:50,091 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:26:50,091 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731659210091"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659210091"}]},"ts":"1731659210091"} 2024-11-15T08:26:50,091 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731659210091"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659210091"}]},"ts":"1731659210091"} 2024-11-15T08:26:50,096 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:26:50,097 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:26:50,098 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659210097"}]},"ts":"1731659210097"} 2024-11-15T08:26:50,100 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-15T08:26:50,101 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:26:50,103 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:26:50,103 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:26:50,103 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:26:50,103 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:26:50,103 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, ASSIGN}, {pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, ASSIGN}] 2024-11-15T08:26:50,105 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, ASSIGN 2024-11-15T08:26:50,107 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, ASSIGN 2024-11-15T08:26:50,108 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:26:50,110 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:26:50,259 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:26:50,260 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=c5fbc719b9109b686ccc0c7d4ece1343, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:50,260 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=215aac9a0193d4f77a31bd916345d889, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:50,264 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, ASSIGN because future has completed 2024-11-15T08:26:50,264 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:26:50,266 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, ASSIGN because future has completed 2024-11-15T08:26:50,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 215aac9a0193d4f77a31bd916345d889, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:26:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T08:26:50,427 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,427 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7752): Opening region: {ENCODED => c5fbc719b9109b686ccc0c7d4ece1343, NAME => 'testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:26:50,428 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. service=AccessControlService 2024-11-15T08:26:50,428 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:50,428 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,429 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:50,429 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7794): checking encryption for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,429 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(7797): checking classloading for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,438 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,438 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7752): Opening region: {ENCODED => 215aac9a0193d4f77a31bd916345d889, NAME => 'testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:26:50,439 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. service=AccessControlService 2024-11-15T08:26:50,440 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:50,440 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,440 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:50,440 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7794): checking encryption for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,440 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(7797): checking classloading for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,445 INFO [StoreOpener-c5fbc719b9109b686ccc0c7d4ece1343-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,448 INFO [StoreOpener-c5fbc719b9109b686ccc0c7d4ece1343-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c5fbc719b9109b686ccc0c7d4ece1343 columnFamilyName cf 2024-11-15T08:26:50,448 DEBUG [StoreOpener-c5fbc719b9109b686ccc0c7d4ece1343-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:50,449 INFO [StoreOpener-c5fbc719b9109b686ccc0c7d4ece1343-1 {}] regionserver.HStore(327): Store=c5fbc719b9109b686ccc0c7d4ece1343/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:50,449 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1038): replaying wal for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,450 INFO [StoreOpener-215aac9a0193d4f77a31bd916345d889-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,451 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,451 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,452 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1048): stopping wal replay for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,452 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1060): Cleaning up temporary data for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,453 INFO [StoreOpener-215aac9a0193d4f77a31bd916345d889-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 215aac9a0193d4f77a31bd916345d889 columnFamilyName cf 2024-11-15T08:26:50,453 DEBUG [StoreOpener-215aac9a0193d4f77a31bd916345d889-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:50,460 INFO [StoreOpener-215aac9a0193d4f77a31bd916345d889-1 {}] regionserver.HStore(327): Store=215aac9a0193d4f77a31bd916345d889/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:50,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1093): writing seq id for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,462 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1038): replaying wal for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,465 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,466 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,468 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1048): stopping wal replay for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,468 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1060): Cleaning up temporary data for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,471 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1093): writing seq id for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,477 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:50,478 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1114): Opened c5fbc719b9109b686ccc0c7d4ece1343; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59439216, jitterRate=-0.11428666114807129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:50,478 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,479 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegion(1006): Region open journal for c5fbc719b9109b686ccc0c7d4ece1343: Running coprocessor pre-open hook at 1731659210429Writing region info on filesystem at 1731659210429Initializing all the Stores at 1731659210432 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659210432Cleaning up temporary data from old regions at 1731659210452 (+20 ms)Running coprocessor post-open hooks at 1731659210478 (+26 ms)Region opened successfully at 1731659210479 (+1 ms) 2024-11-15T08:26:50,480 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343., pid=28, masterSystemTime=1731659210417 2024-11-15T08:26:50,485 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=27 updating hbase:meta row=c5fbc719b9109b686ccc0c7d4ece1343, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:26:50,486 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,486 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=28}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=28, ppid=27, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:26:50,491 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:50,493 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1114): Opened 215aac9a0193d4f77a31bd916345d889; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68117905, jitterRate=0.015035882592201233}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:50,493 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,494 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegion(1006): Region open journal for 215aac9a0193d4f77a31bd916345d889: Running coprocessor pre-open hook at 1731659210440Writing region info on filesystem at 1731659210441 (+1 ms)Initializing all the Stores at 1731659210442 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659210442Cleaning up temporary data from old regions at 1731659210468 (+26 ms)Running coprocessor post-open hooks at 1731659210493 (+25 ms)Region opened successfully at 1731659210494 (+1 ms) 2024-11-15T08:26:50,496 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889., pid=29, masterSystemTime=1731659210422 2024-11-15T08:26:50,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=28, resume processing ppid=27 2024-11-15T08:26:50,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, ppid=27, state=SUCCESS, hasLock=false; OpenRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343, server=48e8ad8dc936,37511,1731659175104 in 228 msec 2024-11-15T08:26:50,501 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,501 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=29}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, ASSIGN in 397 msec 2024-11-15T08:26:50,509 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=26 updating hbase:meta row=215aac9a0193d4f77a31bd916345d889, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:50,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=29, ppid=26, state=RUNNABLE, hasLock=false; OpenRegionProcedure 215aac9a0193d4f77a31bd916345d889, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:26:50,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=26 2024-11-15T08:26:50,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=26, state=SUCCESS, hasLock=false; OpenRegionProcedure 215aac9a0193d4f77a31bd916345d889, server=48e8ad8dc936,40637,1731659175269 in 249 msec 2024-11-15T08:26:50,525 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:26:50,526 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659210526"}]},"ts":"1731659210526"} 2024-11-15T08:26:50,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-11-15T08:26:50,527 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, ASSIGN in 416 msec 2024-11-15T08:26:50,529 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-15T08:26:50,531 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=25, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:26:50,532 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-15T08:26:50,537 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T08:26:50,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:50,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:50,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:50,539 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:50,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:26:50,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:50,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:50,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:50,544 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:50,547 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 603 msec 2024-11-15T08:26:50,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=25 2024-11-15T08:26:50,587 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T08:26:50,587 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-15T08:26:50,587 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:50,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-15T08:26:50,594 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:50,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-11-15T08:26:50,595 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:50,599 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T08:26:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659210599 (current time:1731659210599). 2024-11-15T08:26:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:26:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-15T08:26:50,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:26:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66196cb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:50,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:50,605 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:50,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:50,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:50,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@774aefdd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:50,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:50,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:50,610 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:50,615 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:50,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cfebbd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:50,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:50,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:50,623 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:50,625 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35018, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:50,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:50,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:50,630 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2602bc26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:50,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:50,632 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:50,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:50,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:50,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54649630, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:50,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:50,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:50,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:50,635 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35994, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:50,636 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46f83cf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:50,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:50,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:50,638 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:50,639 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:50,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:50,644 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:50,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T08:26:50,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:26:50,647 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T08:26:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-15T08:26:50,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T08:26:50,651 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:26:50,652 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:26:50,656 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:26:50,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741896_1072 (size=161) 2024-11-15T08:26:50,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741896_1072 (size=161) 2024-11-15T08:26:50,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741896_1072 (size=161) 2024-11-15T08:26:50,735 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:26:50,735 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889}, {pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343}] 2024-11-15T08:26:50,741 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,742 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T08:26:50,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=32 2024-11-15T08:26:50,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=31 2024-11-15T08:26:50,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.HRegion(2603): Flush status journal for c5fbc719b9109b686ccc0c7d4ece1343: 2024-11-15T08:26:50,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-15T08:26:50,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-15T08:26:50,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:50,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:26:50,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.HRegion(2603): Flush status journal for 215aac9a0193d4f77a31bd916345d889: 2024-11-15T08:26:50,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-15T08:26:50,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-15T08:26:50,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:50,900 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:26:50,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741897_1073 (size=68) 2024-11-15T08:26:50,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741897_1073 (size=68) 2024-11-15T08:26:50,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741897_1073 (size=68) 2024-11-15T08:26:50,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:50,945 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=32}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=32 2024-11-15T08:26:50,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=32 2024-11-15T08:26:50,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,946 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=32, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:50,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343 in 213 msec 2024-11-15T08:26:50,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741898_1074 (size=68) 2024-11-15T08:26:50,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741898_1074 (size=68) 2024-11-15T08:26:50,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741898_1074 (size=68) 2024-11-15T08:26:50,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:50,960 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-15T08:26:50,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=31 2024-11-15T08:26:50,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,961 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=31, ppid=30, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:50,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T08:26:50,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=31, resume processing ppid=30 2024-11-15T08:26:50,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, ppid=30, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889 in 228 msec 2024-11-15T08:26:50,968 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:26:50,970 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:26:50,975 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:26:50,975 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-15T08:26:50,976 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-15T08:26:51,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741899_1075 (size=543) 2024-11-15T08:26:51,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741899_1075 (size=543) 2024-11-15T08:26:51,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741899_1075 (size=543) 2024-11-15T08:26:51,029 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:26:51,038 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:26:51,039 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-15T08:26:51,043 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=30, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:26:51,044 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 30 2024-11-15T08:26:51,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=30, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 398 msec 2024-11-15T08:26:51,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=30 2024-11-15T08:26:51,277 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T08:26:51,281 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0a26f75ab6b248c02b9d9cce2866f5f14', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:51,283 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='187ca555f18a73e54ec566ea20cdbb317', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:51,284 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='23cd73e9f9d265b4b7569ea21c5714e4a', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:51,285 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='3259e3f23dc6fd7a28a8d141fc122a25d', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:51,286 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='49db5ec11533f8e8cf711d29b7e72a82a', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:51,287 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='5b56f292e53776462f98162cc990e8a16', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:26:51,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:26:51,290 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:26:51,293 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:51,295 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-15T08:26:51,295 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:51,295 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:51,297 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:51,302 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:51,308 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:51,311 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T08:26:51,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659211311 (current time:1731659211311). 2024-11-15T08:26:51,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:26:51,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-15T08:26:51,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:26:51,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35514a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:51,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:51,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:51,313 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:51,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:51,313 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:51,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f594e7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:51,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:51,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:51,315 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:51,315 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33068, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:51,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32618dda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:51,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:51,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:51,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:51,318 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41932, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:51,319 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:51,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:51,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:51,320 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bd691fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:51,321 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:51,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:51,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:51,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6aa67760, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:51,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:51,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:51,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:51,323 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33076, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:51,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b7eb208, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:51,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:51,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:51,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:51,325 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:51,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:51,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:51,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:51,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:51,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:51,329 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:51,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T08:26:51,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:26:51,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-15T08:26:51,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-15T08:26:51,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T08:26:51,332 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:26:51,333 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:26:51,336 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:26:51,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741900_1076 (size=156) 2024-11-15T08:26:51,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741900_1076 (size=156) 2024-11-15T08:26:51,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741900_1076 (size=156) 2024-11-15T08:26:51,345 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:26:51,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889}, {pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343}] 2024-11-15T08:26:51,347 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:51,349 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:51,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T08:26:51,501 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=34 2024-11-15T08:26:51,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:51,502 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2902): Flushing 215aac9a0193d4f77a31bd916345d889 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-11-15T08:26:51,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=35 2024-11-15T08:26:51,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:51,505 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2902): Flushing c5fbc719b9109b686ccc0c7d4ece1343 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-11-15T08:26:51,520 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/.tmp/cf/eaa3389297a74605a8d179a2ad503e1e is 71, key is 01f6b42b13429b7a907ad2d62ef1c0df/cf:q/1731659211289/Put/seqid=0 2024-11-15T08:26:51,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/.tmp/cf/2828c90a0da94a5593abb644470cdeee is 71, key is 112fd0ec9c5f171e3bcbdeaa132ba48f/cf:q/1731659211290/Put/seqid=0 2024-11-15T08:26:51,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741901_1077 (size=5566) 2024-11-15T08:26:51,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741901_1077 (size=5566) 2024-11-15T08:26:51,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741901_1077 (size=5566) 2024-11-15T08:26:51,581 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/.tmp/cf/eaa3389297a74605a8d179a2ad503e1e 2024-11-15T08:26:51,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741902_1078 (size=8052) 2024-11-15T08:26:51,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741902_1078 (size=8052) 2024-11-15T08:26:51,588 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/.tmp/cf/2828c90a0da94a5593abb644470cdeee 2024-11-15T08:26:51,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741902_1078 (size=8052) 2024-11-15T08:26:51,598 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/.tmp/cf/2828c90a0da94a5593abb644470cdeee as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf/2828c90a0da94a5593abb644470cdeee 2024-11-15T08:26:51,608 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf/2828c90a0da94a5593abb644470cdeee, entries=43, sequenceid=6, filesize=7.9 K 2024-11-15T08:26:51,611 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for c5fbc719b9109b686ccc0c7d4ece1343 in 106ms, sequenceid=6, compaction requested=false 2024-11-15T08:26:51,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-15T08:26:51,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.HRegion(2603): Flush status journal for c5fbc719b9109b686ccc0c7d4ece1343: 2024-11-15T08:26:51,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. for snaptb0-testExportWithResetTtl completed. 2024-11-15T08:26:51,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,612 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:51,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf/2828c90a0da94a5593abb644470cdeee] hfiles 2024-11-15T08:26:51,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf/2828c90a0da94a5593abb644470cdeee for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/.tmp/cf/eaa3389297a74605a8d179a2ad503e1e as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf/eaa3389297a74605a8d179a2ad503e1e 2024-11-15T08:26:51,637 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf/eaa3389297a74605a8d179a2ad503e1e, entries=7, sequenceid=6, filesize=5.4 K 2024-11-15T08:26:51,639 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for 215aac9a0193d4f77a31bd916345d889 in 136ms, sequenceid=6, compaction requested=false 2024-11-15T08:26:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.HRegion(2603): Flush status journal for 215aac9a0193d4f77a31bd916345d889: 2024-11-15T08:26:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. for snaptb0-testExportWithResetTtl completed. 2024-11-15T08:26:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf/eaa3389297a74605a8d179a2ad503e1e] hfiles 2024-11-15T08:26:51,639 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf/eaa3389297a74605a8d179a2ad503e1e for snapshot=snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741903_1079 (size=107) 2024-11-15T08:26:51,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741903_1079 (size=107) 2024-11-15T08:26:51,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741903_1079 (size=107) 2024-11-15T08:26:51,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T08:26:51,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:26:51,649 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-15T08:26:51,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=35 2024-11-15T08:26:51,650 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:51,650 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=35, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:26:51,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343 in 306 msec 2024-11-15T08:26:51,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741904_1080 (size=107) 2024-11-15T08:26:51,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741904_1080 (size=107) 2024-11-15T08:26:51,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741904_1080 (size=107) 2024-11-15T08:26:51,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:26:51,669 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=34}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=34 2024-11-15T08:26:51,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=34 2024-11-15T08:26:51,670 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:51,670 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=33, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:26:51,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=33 2024-11-15T08:26:51,676 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=33, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 215aac9a0193d4f77a31bd916345d889 in 327 msec 2024-11-15T08:26:51,676 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:26:51,677 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:26:51,678 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:26:51,678 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,679 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741905_1081 (size=621) 2024-11-15T08:26:51,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741905_1081 (size=621) 2024-11-15T08:26:51,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741905_1081 (size=621) 2024-11-15T08:26:51,699 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:26:51,710 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:26:51,711 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-15T08:26:51,713 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=33, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:26:51,713 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 33 2024-11-15T08:26:51,715 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=33, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 383 msec 2024-11-15T08:26:51,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=33 2024-11-15T08:26:51,956 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T08:26:51,958 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:26:51,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-15T08:26:51,960 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:26:51,961 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:51,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 36 2024-11-15T08:26:51,962 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:26:51,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T08:26:51,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741906_1082 (size=397) 2024-11-15T08:26:51,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741906_1082 (size=397) 2024-11-15T08:26:51,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741906_1082 (size=397) 2024-11-15T08:26:51,976 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6347eed6bb6a47835827555fa53914aa, NAME => 'testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:51,977 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => e7d8881f335ac89ab35d99dc388ac927, NAME => 'testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:51,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741908_1084 (size=58) 2024-11-15T08:26:51,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741908_1084 (size=58) 2024-11-15T08:26:51,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741907_1083 (size=58) 2024-11-15T08:26:51,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741908_1084 (size=58) 2024-11-15T08:26:51,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741907_1083 (size=58) 2024-11-15T08:26:51,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741907_1083 (size=58) 2024-11-15T08:26:51,993 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:51,993 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing e7d8881f335ac89ab35d99dc388ac927, disabling compactions & flushes 2024-11-15T08:26:51,993 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. after waiting 0 ms 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:51,994 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for e7d8881f335ac89ab35d99dc388ac927: Waiting for close lock at 1731659211993Disabling compacts and flushes for region at 1731659211993Disabling writes for close at 1731659211994 (+1 ms)Writing region close event to WAL at 1731659211994Closed at 1731659211994 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 6347eed6bb6a47835827555fa53914aa, disabling compactions & flushes 2024-11-15T08:26:51,994 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. after waiting 0 ms 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:51,994 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:51,994 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6347eed6bb6a47835827555fa53914aa: Waiting for close lock at 1731659211994Disabling compacts and flushes for region at 1731659211994Disabling writes for close at 1731659211994Writing region close event to WAL at 1731659211994Closed at 1731659211994 2024-11-15T08:26:51,996 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:26:51,996 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731659211996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659211996"}]},"ts":"1731659211996"} 2024-11-15T08:26:51,997 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1731659211996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659211996"}]},"ts":"1731659211996"} 2024-11-15T08:26:52,000 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:26:52,002 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:26:52,002 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659212002"}]},"ts":"1731659212002"} 2024-11-15T08:26:52,005 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-15T08:26:52,005 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:26:52,006 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:26:52,007 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:26:52,007 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:26:52,007 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:26:52,007 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:26:52,007 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:26:52,007 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:26:52,007 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:26:52,007 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:26:52,007 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:26:52,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, ASSIGN}, {pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, ASSIGN}] 2024-11-15T08:26:52,009 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, ASSIGN 2024-11-15T08:26:52,009 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, ASSIGN 2024-11-15T08:26:52,010 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:26:52,010 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:26:52,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T08:26:52,161 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:26:52,161 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=e7d8881f335ac89ab35d99dc388ac927, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:52,161 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=6347eed6bb6a47835827555fa53914aa, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:52,165 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=38, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, ASSIGN because future has completed 2024-11-15T08:26:52,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure e7d8881f335ac89ab35d99dc388ac927, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:26:52,166 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=37, ppid=36, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, ASSIGN because future has completed 2024-11-15T08:26:52,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6347eed6bb6a47835827555fa53914aa, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:26:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T08:26:52,325 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:52,325 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7752): Opening region: {ENCODED => e7d8881f335ac89ab35d99dc388ac927, NAME => 'testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:26:52,325 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:52,325 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7752): Opening region: {ENCODED => 6347eed6bb6a47835827555fa53914aa, NAME => 'testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:26:52,325 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. service=AccessControlService 2024-11-15T08:26:52,325 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. service=AccessControlService 2024-11-15T08:26:52,326 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:52,326 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7794): checking encryption for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7794): checking encryption for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(7797): checking classloading for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,326 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(7797): checking classloading for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,327 INFO [StoreOpener-e7d8881f335ac89ab35d99dc388ac927-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,328 INFO [StoreOpener-6347eed6bb6a47835827555fa53914aa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,329 INFO [StoreOpener-6347eed6bb6a47835827555fa53914aa-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6347eed6bb6a47835827555fa53914aa columnFamilyName cf 2024-11-15T08:26:52,329 INFO [StoreOpener-e7d8881f335ac89ab35d99dc388ac927-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e7d8881f335ac89ab35d99dc388ac927 columnFamilyName cf 2024-11-15T08:26:52,329 DEBUG [StoreOpener-6347eed6bb6a47835827555fa53914aa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:52,329 DEBUG [StoreOpener-e7d8881f335ac89ab35d99dc388ac927-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:26:52,330 INFO [StoreOpener-6347eed6bb6a47835827555fa53914aa-1 {}] regionserver.HStore(327): Store=6347eed6bb6a47835827555fa53914aa/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:52,330 INFO [StoreOpener-e7d8881f335ac89ab35d99dc388ac927-1 {}] regionserver.HStore(327): Store=e7d8881f335ac89ab35d99dc388ac927/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:26:52,330 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1038): replaying wal for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,330 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1038): replaying wal for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,331 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,331 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,331 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,331 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,332 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1048): stopping wal replay for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,332 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1048): stopping wal replay for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,332 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1060): Cleaning up temporary data for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,332 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1060): Cleaning up temporary data for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,334 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1093): writing seq id for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,334 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1093): writing seq id for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,336 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:52,337 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:26:52,337 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1114): Opened e7d8881f335ac89ab35d99dc388ac927; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67339075, jitterRate=0.003430411219596863}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:52,337 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1114): Opened 6347eed6bb6a47835827555fa53914aa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67243906, jitterRate=0.002012282609939575}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:26:52,337 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1122): Running coprocessor post-open hooks for e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,337 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,338 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegion(1006): Region open journal for e7d8881f335ac89ab35d99dc388ac927: Running coprocessor pre-open hook at 1731659212326Writing region info on filesystem at 1731659212326Initializing all the Stores at 1731659212327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659212327Cleaning up temporary data from old regions at 1731659212332 (+5 ms)Running coprocessor post-open hooks at 1731659212337 (+5 ms)Region opened successfully at 1731659212338 (+1 ms) 2024-11-15T08:26:52,338 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegion(1006): Region open journal for 6347eed6bb6a47835827555fa53914aa: Running coprocessor pre-open hook at 1731659212326Writing region info on filesystem at 1731659212326Initializing all the Stores at 1731659212327 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659212327Cleaning up temporary data from old regions at 1731659212332 (+5 ms)Running coprocessor post-open hooks at 1731659212337 (+5 ms)Region opened successfully at 1731659212338 (+1 ms) 2024-11-15T08:26:52,339 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa., pid=40, masterSystemTime=1731659212322 2024-11-15T08:26:52,339 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927., pid=39, masterSystemTime=1731659212321 2024-11-15T08:26:52,341 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:52,341 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=40}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:52,342 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=37 updating hbase:meta row=6347eed6bb6a47835827555fa53914aa, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:26:52,343 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:52,343 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=39}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:52,344 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=38 updating hbase:meta row=e7d8881f335ac89ab35d99dc388ac927, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:26:52,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=37, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6347eed6bb6a47835827555fa53914aa, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:26:52,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=39, ppid=38, state=RUNNABLE, hasLock=false; OpenRegionProcedure e7d8881f335ac89ab35d99dc388ac927, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:26:52,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=37 2024-11-15T08:26:52,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=37, state=SUCCESS, hasLock=false; OpenRegionProcedure 6347eed6bb6a47835827555fa53914aa, server=48e8ad8dc936,40637,1731659175269 in 178 msec 2024-11-15T08:26:52,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-15T08:26:52,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, ASSIGN in 342 msec 2024-11-15T08:26:52,352 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; OpenRegionProcedure e7d8881f335ac89ab35d99dc388ac927, server=48e8ad8dc936,34555,1731659175344 in 183 msec 2024-11-15T08:26:52,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=38, resume processing ppid=36 2024-11-15T08:26:52,354 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, ppid=36, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, ASSIGN in 345 msec 2024-11-15T08:26:52,355 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:26:52,355 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659212355"}]},"ts":"1731659212355"} 2024-11-15T08:26:52,357 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-15T08:26:52,358 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=36, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:26:52,358 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-15T08:26:52,362 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T08:26:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:52,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:26:52,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,527 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,528 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,528 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 568 msec 2024-11-15T08:26:52,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,533 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:26:52,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=36 2024-11-15T08:26:52,587 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-15T08:26:52,587 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-11-15T08:26:52,587 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:52,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-11-15T08:26:52,596 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:52,596 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-11-15T08:26:52,596 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:52,608 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='049a482a9c8bde47b2328c1a7b84ce2f0', locateType=CURRENT is [region=testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:52,609 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='17b038c1b1fae351eb4eff5b39f1b44f5', locateType=CURRENT is [region=testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:26:52,611 DEBUG [Time-limited test {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='29613dd90b2d16e4d01d952a988e190cc', locateType=CURRENT is [region=testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:26:52,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34555 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:26:52,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:26:52,635 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:52,641 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-15T08:26:52,641 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:52,641 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:26:52,644 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:52,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:52,672 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-15T08:26:52,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-15T08:26:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659212677 (current time:1731659212677). 2024-11-15T08:26:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-15T08:26:52,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:26:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78195103, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:52,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:52,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:52,687 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:52,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:52,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:52,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71829b09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:52,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:52,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:52,688 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:52,690 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33090, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:52,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a3f9cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:52,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:52,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:52,693 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:52,695 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41952, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:52,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:52,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:52,697 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a64650e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:26:52,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:26:52,701 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:26:52,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:26:52,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:26:52,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60feca40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:52,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:26:52,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:26:52,703 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:52,704 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:26:52,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7331b94b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:26:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:26:52,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:26:52,708 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:26:52,710 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:26:52,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:26:52,715 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:26:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:26:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:52,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:26:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-15T08:26:52,716 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:26:52,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:26:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-15T08:26:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-15T08:26:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T08:26:52,722 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:26:52,724 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:26:52,727 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:26:52,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741909_1085 (size=143) 2024-11-15T08:26:52,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741909_1085 (size=143) 2024-11-15T08:26:52,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741909_1085 (size=143) 2024-11-15T08:26:52,785 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:26:52,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6347eed6bb6a47835827555fa53914aa}, {pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e7d8881f335ac89ab35d99dc388ac927}] 2024-11-15T08:26:52,787 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:52,787 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:52,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T08:26:52,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=42 2024-11-15T08:26:52,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=43 2024-11-15T08:26:52,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:52,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:52,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2902): Flushing 6347eed6bb6a47835827555fa53914aa 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-15T08:26:52,941 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2902): Flushing e7d8881f335ac89ab35d99dc388ac927 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-15T08:26:52,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/.tmp/cf/b220066fbb6a480f96a536970d28ba47 is 71, key is 00879ddb4bba602ef547357ba888bf7f/cf:q/1731659212633/Put/seqid=0 2024-11-15T08:26:52,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/.tmp/cf/aa2698852cc642ad9431415b4b2ee5e8 is 71, key is 1bcbdbfb286c616b86d67932fcfdf9df/cf:q/1731659212631/Put/seqid=0 2024-11-15T08:26:53,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741910_1086 (size=5422) 2024-11-15T08:26:53,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741910_1086 (size=5422) 2024-11-15T08:26:53,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741910_1086 (size=5422) 2024-11-15T08:26:53,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/.tmp/cf/b220066fbb6a480f96a536970d28ba47 2024-11-15T08:26:53,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741911_1087 (size=8190) 2024-11-15T08:26:53,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741911_1087 (size=8190) 2024-11-15T08:26:53,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741911_1087 (size=8190) 2024-11-15T08:26:53,010 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/.tmp/cf/aa2698852cc642ad9431415b4b2ee5e8 2024-11-15T08:26:53,017 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/.tmp/cf/b220066fbb6a480f96a536970d28ba47 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf/b220066fbb6a480f96a536970d28ba47 2024-11-15T08:26:53,021 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/.tmp/cf/aa2698852cc642ad9431415b4b2ee5e8 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf/aa2698852cc642ad9431415b4b2ee5e8 2024-11-15T08:26:53,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf/b220066fbb6a480f96a536970d28ba47, entries=5, sequenceid=5, filesize=5.3 K 2024-11-15T08:26:53,026 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 6347eed6bb6a47835827555fa53914aa in 86ms, sequenceid=5, compaction requested=false 2024-11-15T08:26:53,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-15T08:26:53,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.HRegion(2603): Flush status journal for 6347eed6bb6a47835827555fa53914aa: 2024-11-15T08:26:53,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. for snaptb-testExportWithResetTtl completed. 2024-11-15T08:26:53,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-15T08:26:53,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:53,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf/b220066fbb6a480f96a536970d28ba47] hfiles 2024-11-15T08:26:53,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf/b220066fbb6a480f96a536970d28ba47 for snapshot=snaptb-testExportWithResetTtl 2024-11-15T08:26:53,029 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf/aa2698852cc642ad9431415b4b2ee5e8, entries=45, sequenceid=5, filesize=8.0 K 2024-11-15T08:26:53,030 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for e7d8881f335ac89ab35d99dc388ac927 in 90ms, sequenceid=5, compaction requested=false 2024-11-15T08:26:53,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.HRegion(2603): Flush status journal for e7d8881f335ac89ab35d99dc388ac927: 2024-11-15T08:26:53,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. for snaptb-testExportWithResetTtl completed. 2024-11-15T08:26:53,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-15T08:26:53,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:26:53,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf/aa2698852cc642ad9431415b4b2ee5e8] hfiles 2024-11-15T08:26:53,031 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf/aa2698852cc642ad9431415b4b2ee5e8 for snapshot=snaptb-testExportWithResetTtl 2024-11-15T08:26:53,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T08:26:53,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741912_1088 (size=100) 2024-11-15T08:26:53,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741912_1088 (size=100) 2024-11-15T08:26:53,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741912_1088 (size=100) 2024-11-15T08:26:53,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:26:53,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-11-15T08:26:53,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=42 2024-11-15T08:26:53,063 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:53,063 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=42, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:26:53,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741913_1089 (size=100) 2024-11-15T08:26:53,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741913_1089 (size=100) 2024-11-15T08:26:53,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6347eed6bb6a47835827555fa53914aa in 280 msec 2024-11-15T08:26:53,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741913_1089 (size=100) 2024-11-15T08:26:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:26:53,070 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=43}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=43 2024-11-15T08:26:53,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=43 2024-11-15T08:26:53,071 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:53,071 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=43, ppid=41, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:26:53,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=41 2024-11-15T08:26:53,076 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=41, state=SUCCESS, hasLock=false; SnapshotRegionProcedure e7d8881f335ac89ab35d99dc388ac927 in 288 msec 2024-11-15T08:26:53,076 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:26:53,077 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:26:53,078 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:26:53,078 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-15T08:26:53,079 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-15T08:26:53,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741914_1090 (size=600) 2024-11-15T08:26:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741914_1090 (size=600) 2024-11-15T08:26:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741914_1090 (size=600) 2024-11-15T08:26:53,107 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:26:53,116 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:26:53,117 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-15T08:26:53,119 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=41, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:26:53,119 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 41 2024-11-15T08:26:53,121 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=41, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 402 msec 2024-11-15T08:26:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=41 2024-11-15T08:26:53,346 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-15T08:26:53,356 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356 2024-11-15T08:26:53,357 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:53,391 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:26:53,391 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-15T08:26:53,394 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:26:53,402 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-15T08:26:53,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741915_1091 (size=143) 2024-11-15T08:26:53,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741915_1091 (size=143) 2024-11-15T08:26:53,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741915_1091 (size=143) 2024-11-15T08:26:53,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741916_1092 (size=600) 2024-11-15T08:26:53,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741916_1092 (size=600) 2024-11-15T08:26:53,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741916_1092 (size=600) 2024-11-15T08:26:53,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741917_1093 (size=141) 2024-11-15T08:26:53,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741917_1093 (size=141) 2024-11-15T08:26:53,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741917_1093 (size=141) 2024-11-15T08:26:53,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:53,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:53,455 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,113 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0001_000001 (auth:SIMPLE) from 127.0.0.1:55948 2024-11-15T08:26:54,126 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0001/container_1731659181416_0001_01_000001/launch_container.sh] 2024-11-15T08:26:54,126 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0001/container_1731659181416_0001_01_000001/container_tokens] 2024-11-15T08:26:54,126 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0001/container_1731659181416_0001_01_000001/sysfs] 2024-11-15T08:26:54,678 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-11298863009804338755.jar 2024-11-15T08:26:54,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,679 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,777 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-1944086969246557047.jar 2024-11-15T08:26:54,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:26:54,780 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:26:54,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:26:54,781 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:26:54,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:26:54,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:26:54,782 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:26:54,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:26:54,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:26:54,783 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:26:54,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:26:54,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:26:54,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:54,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:54,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:26:54,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:54,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:26:54,786 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:26:54,787 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:26:54,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741918_1094 (size=131440) 2024-11-15T08:26:54,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741918_1094 (size=131440) 2024-11-15T08:26:54,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741918_1094 (size=131440) 2024-11-15T08:26:54,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741919_1095 (size=4188619) 2024-11-15T08:26:54,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741919_1095 (size=4188619) 2024-11-15T08:26:54,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741919_1095 (size=4188619) 2024-11-15T08:26:54,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741920_1096 (size=6424751) 2024-11-15T08:26:54,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741920_1096 (size=6424751) 2024-11-15T08:26:54,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741920_1096 (size=6424751) 2024-11-15T08:26:54,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741921_1097 (size=1323991) 2024-11-15T08:26:54,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741921_1097 (size=1323991) 2024-11-15T08:26:54,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741921_1097 (size=1323991) 2024-11-15T08:26:54,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741922_1098 (size=903738) 2024-11-15T08:26:54,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741922_1098 (size=903738) 2024-11-15T08:26:54,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741922_1098 (size=903738) 2024-11-15T08:26:55,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741923_1099 (size=8360083) 2024-11-15T08:26:55,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741923_1099 (size=8360083) 2024-11-15T08:26:55,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741923_1099 (size=8360083) 2024-11-15T08:26:55,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741924_1100 (size=1877034) 2024-11-15T08:26:55,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741924_1100 (size=1877034) 2024-11-15T08:26:55,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741924_1100 (size=1877034) 2024-11-15T08:26:55,160 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:26:55,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741925_1101 (size=77835) 2024-11-15T08:26:55,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741925_1101 (size=77835) 2024-11-15T08:26:55,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741925_1101 (size=77835) 2024-11-15T08:26:55,226 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-15T08:26:55,226 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-15T08:26:55,227 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-15T08:26:55,227 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-15T08:26:55,228 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-15T08:26:55,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741926_1102 (size=30949) 2024-11-15T08:26:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741926_1102 (size=30949) 2024-11-15T08:26:55,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741926_1102 (size=30949) 2024-11-15T08:26:55,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741927_1103 (size=1597327) 2024-11-15T08:26:55,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741927_1103 (size=1597327) 2024-11-15T08:26:55,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741927_1103 (size=1597327) 2024-11-15T08:26:55,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741928_1104 (size=4695811) 2024-11-15T08:26:55,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741928_1104 (size=4695811) 2024-11-15T08:26:55,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741928_1104 (size=4695811) 2024-11-15T08:26:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741929_1105 (size=232957) 2024-11-15T08:26:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741929_1105 (size=232957) 2024-11-15T08:26:55,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741929_1105 (size=232957) 2024-11-15T08:26:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741930_1106 (size=127628) 2024-11-15T08:26:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741930_1106 (size=127628) 2024-11-15T08:26:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741930_1106 (size=127628) 2024-11-15T08:26:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741931_1107 (size=20406) 2024-11-15T08:26:55,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741931_1107 (size=20406) 2024-11-15T08:26:55,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741931_1107 (size=20406) 2024-11-15T08:26:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741932_1108 (size=5175431) 2024-11-15T08:26:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741932_1108 (size=5175431) 2024-11-15T08:26:55,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741932_1108 (size=5175431) 2024-11-15T08:26:55,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741933_1109 (size=217634) 2024-11-15T08:26:55,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741933_1109 (size=217634) 2024-11-15T08:26:55,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741933_1109 (size=217634) 2024-11-15T08:26:55,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741934_1110 (size=1832290) 2024-11-15T08:26:55,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741934_1110 (size=1832290) 2024-11-15T08:26:55,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741934_1110 (size=1832290) 2024-11-15T08:26:55,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741935_1111 (size=322274) 2024-11-15T08:26:55,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741935_1111 (size=322274) 2024-11-15T08:26:55,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741935_1111 (size=322274) 2024-11-15T08:26:55,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741936_1112 (size=503880) 2024-11-15T08:26:55,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741936_1112 (size=503880) 2024-11-15T08:26:55,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741936_1112 (size=503880) 2024-11-15T08:26:55,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741937_1113 (size=29229) 2024-11-15T08:26:55,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741937_1113 (size=29229) 2024-11-15T08:26:55,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741937_1113 (size=29229) 2024-11-15T08:26:55,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741938_1114 (size=440656) 2024-11-15T08:26:55,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741938_1114 (size=440656) 2024-11-15T08:26:55,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741938_1114 (size=440656) 2024-11-15T08:26:56,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741939_1115 (size=24096) 2024-11-15T08:26:56,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741939_1115 (size=24096) 2024-11-15T08:26:56,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741939_1115 (size=24096) 2024-11-15T08:26:56,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741940_1116 (size=111872) 2024-11-15T08:26:56,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741940_1116 (size=111872) 2024-11-15T08:26:56,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741940_1116 (size=111872) 2024-11-15T08:26:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741941_1117 (size=45609) 2024-11-15T08:26:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741941_1117 (size=45609) 2024-11-15T08:26:56,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741941_1117 (size=45609) 2024-11-15T08:26:56,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741942_1118 (size=136454) 2024-11-15T08:26:56,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741942_1118 (size=136454) 2024-11-15T08:26:56,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741942_1118 (size=136454) 2024-11-15T08:26:56,502 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:26:56,505 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-15T08:26:56,508 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:26:56,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741943_1119 (size=324) 2024-11-15T08:26:56,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741943_1119 (size=324) 2024-11-15T08:26:56,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741943_1119 (size=324) 2024-11-15T08:26:56,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741944_1120 (size=15) 2024-11-15T08:26:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741944_1120 (size=15) 2024-11-15T08:26:56,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741944_1120 (size=15) 2024-11-15T08:26:56,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741945_1121 (size=303728) 2024-11-15T08:26:56,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741945_1121 (size=303728) 2024-11-15T08:26:56,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741945_1121 (size=303728) 2024-11-15T08:26:56,573 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:26:56,573 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:26:56,782 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0002_000001 (auth:SIMPLE) from 127.0.0.1:57072 2024-11-15T08:26:59,443 INFO [master/48e8ad8dc936:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-15T08:26:59,443 INFO [master/48e8ad8dc936:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-15T08:27:00,731 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:27:03,482 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0002_000001 (auth:SIMPLE) from 127.0.0.1:50694 2024-11-15T08:27:03,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741946_1122 (size=349378) 2024-11-15T08:27:03,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741946_1122 (size=349378) 2024-11-15T08:27:03,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741946_1122 (size=349378) 2024-11-15T08:27:05,776 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0002_000001 (auth:SIMPLE) from 127.0.0.1:36282 2024-11-15T08:27:09,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741947_1123 (size=8190) 2024-11-15T08:27:09,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741947_1123 (size=8190) 2024-11-15T08:27:09,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741947_1123 (size=8190) 2024-11-15T08:27:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741948_1124 (size=5422) 2024-11-15T08:27:09,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741948_1124 (size=5422) 2024-11-15T08:27:09,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741948_1124 (size=5422) 2024-11-15T08:27:09,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741949_1125 (size=17398) 2024-11-15T08:27:09,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741949_1125 (size=17398) 2024-11-15T08:27:09,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741949_1125 (size=17398) 2024-11-15T08:27:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741950_1126 (size=461) 2024-11-15T08:27:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741950_1126 (size=461) 2024-11-15T08:27:09,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741950_1126 (size=461) 2024-11-15T08:27:09,782 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0002/container_1731659181416_0002_01_000002/launch_container.sh] 2024-11-15T08:27:09,782 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0002/container_1731659181416_0002_01_000002/container_tokens] 2024-11-15T08:27:09,782 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0002/container_1731659181416_0002_01_000002/sysfs] 2024-11-15T08:27:09,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741951_1127 (size=17398) 2024-11-15T08:27:09,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741951_1127 (size=17398) 2024-11-15T08:27:09,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741951_1127 (size=17398) 2024-11-15T08:27:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741952_1128 (size=349378) 2024-11-15T08:27:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741952_1128 (size=349378) 2024-11-15T08:27:09,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741952_1128 (size=349378) 2024-11-15T08:27:09,846 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0002_000001 (auth:SIMPLE) from 127.0.0.1:36284 2024-11-15T08:27:11,778 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:27:11,781 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:27:11,788 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb-testExportWithResetTtl 2024-11-15T08:27:11,788 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:27:11,789 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:27:11,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-15T08:27:11,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-15T08:27:11,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-15T08:27:11,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-15T08:27:11,790 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-15T08:27:11,790 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659213356/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-15T08:27:11,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-15T08:27:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-15T08:27:11,802 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659231801"}]},"ts":"1731659231801"} 2024-11-15T08:27:11,804 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-15T08:27:11,804 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-15T08:27:11,805 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-15T08:27:11,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, UNASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, UNASSIGN}] 2024-11-15T08:27:11,808 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, UNASSIGN 2024-11-15T08:27:11,808 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, UNASSIGN 2024-11-15T08:27:11,810 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e7d8881f335ac89ab35d99dc388ac927, regionState=CLOSING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:27:11,810 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=6347eed6bb6a47835827555fa53914aa, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:27:11,812 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, UNASSIGN because future has completed 2024-11-15T08:27:11,812 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:27:11,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6347eed6bb6a47835827555fa53914aa, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:27:11,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, UNASSIGN because future has completed 2024-11-15T08:27:11,814 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:27:11,814 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure e7d8881f335ac89ab35d99dc388ac927, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:27:11,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-15T08:27:11,967 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(122): Close 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:27:11,967 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:27:11,967 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1722): Closing 6347eed6bb6a47835827555fa53914aa, disabling compactions & flushes 2024-11-15T08:27:11,967 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:27:11,967 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:27:11,967 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. after waiting 0 ms 2024-11-15T08:27:11,967 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:27:11,967 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(122): Close e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:27:11,968 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:27:11,968 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1722): Closing e7d8881f335ac89ab35d99dc388ac927, disabling compactions & flushes 2024-11-15T08:27:11,968 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:27:11,968 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:27:11,968 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. after waiting 0 ms 2024-11-15T08:27:11,968 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:27:11,973 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T08:27:11,973 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T08:27:11,974 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:27:11,974 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927. 2024-11-15T08:27:11,974 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1676): Region close journal for e7d8881f335ac89ab35d99dc388ac927: Waiting for close lock at 1731659231968Running coprocessor pre-close hooks at 1731659231968Disabling compacts and flushes for region at 1731659231968Disabling writes for close at 1731659231968Writing region close event to WAL at 1731659231969 (+1 ms)Running coprocessor post-close hooks at 1731659231973 (+4 ms)Closed at 1731659231974 (+1 ms) 2024-11-15T08:27:11,974 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:27:11,974 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa. 2024-11-15T08:27:11,974 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] regionserver.HRegion(1676): Region close journal for 6347eed6bb6a47835827555fa53914aa: Waiting for close lock at 1731659231967Running coprocessor pre-close hooks at 1731659231967Disabling compacts and flushes for region at 1731659231967Disabling writes for close at 1731659231967Writing region close event to WAL at 1731659231968 (+1 ms)Running coprocessor post-close hooks at 1731659231974 (+6 ms)Closed at 1731659231974 2024-11-15T08:27:11,976 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(157): Closed e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:27:11,976 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=e7d8881f335ac89ab35d99dc388ac927, regionState=CLOSED 2024-11-15T08:27:11,977 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=48}] handler.UnassignRegionHandler(157): Closed 6347eed6bb6a47835827555fa53914aa 2024-11-15T08:27:11,977 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=6347eed6bb6a47835827555fa53914aa, regionState=CLOSED 2024-11-15T08:27:11,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=47, state=RUNNABLE, hasLock=false; CloseRegionProcedure e7d8881f335ac89ab35d99dc388ac927, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:27:11,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=46, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6347eed6bb6a47835827555fa53914aa, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:27:11,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=47 2024-11-15T08:27:11,983 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=47, state=SUCCESS, hasLock=false; CloseRegionProcedure e7d8881f335ac89ab35d99dc388ac927, server=48e8ad8dc936,34555,1731659175344 in 165 msec 2024-11-15T08:27:11,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=46 2024-11-15T08:27:11,984 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=46, state=SUCCESS, hasLock=false; CloseRegionProcedure 6347eed6bb6a47835827555fa53914aa, server=48e8ad8dc936,40637,1731659175269 in 169 msec 2024-11-15T08:27:11,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=e7d8881f335ac89ab35d99dc388ac927, UNASSIGN in 176 msec 2024-11-15T08:27:11,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-15T08:27:11,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=6347eed6bb6a47835827555fa53914aa, UNASSIGN in 177 msec 2024-11-15T08:27:11,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=45, resume processing ppid=44 2024-11-15T08:27:11,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, ppid=44, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 182 msec 2024-11-15T08:27:11,990 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659231990"}]},"ts":"1731659231990"} 2024-11-15T08:27:11,992 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-15T08:27:11,992 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-15T08:27:11,995 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 196 msec 2024-11-15T08:27:12,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-15T08:27:12,116 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-15T08:27:12,116 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-15T08:27:12,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:12,119 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=50, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-15T08:27:12,120 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=50, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:12,122 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-15T08:27:12,124 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa 2024-11-15T08:27:12,124 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:27:12,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,126 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/recovered.edits] 2024-11-15T08:27:12,127 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/recovered.edits] 2024-11-15T08:27:12,127 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,127 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,127 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-15T08:27:12,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-15T08:27:12,131 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:12,131 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:12,131 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:12,132 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:12,132 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf/aa2698852cc642ad9431415b4b2ee5e8 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/cf/aa2698852cc642ad9431415b4b2ee5e8 2024-11-15T08:27:12,133 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf/b220066fbb6a480f96a536970d28ba47 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/cf/b220066fbb6a480f96a536970d28ba47 2024-11-15T08:27:12,136 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/recovered.edits/8.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927/recovered.edits/8.seqid 2024-11-15T08:27:12,137 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/recovered.edits/8.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa/recovered.edits/8.seqid 2024-11-15T08:27:12,137 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/e7d8881f335ac89ab35d99dc388ac927 2024-11-15T08:27:12,137 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportWithResetTtl/6347eed6bb6a47835827555fa53914aa 2024-11-15T08:27:12,137 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-15T08:27:12,140 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=50, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:12,143 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-15T08:27:12,146 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-15T08:27:12,148 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=50, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:12,148 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-15T08:27:12,149 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659232148"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:12,149 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659232148"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:12,153 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:27:12,153 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6347eed6bb6a47835827555fa53914aa, NAME => 'testExportWithResetTtl,,1731659211957.6347eed6bb6a47835827555fa53914aa.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => e7d8881f335ac89ab35d99dc388ac927, NAME => 'testExportWithResetTtl,1,1731659211957.e7d8881f335ac89ab35d99dc388ac927.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:27:12,153 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-15T08:27:12,153 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659232153"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:12,157 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-15T08:27:12,158 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=50, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-15T08:27:12,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 42 msec 2024-11-15T08:27:12,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-15T08:27:12,236 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-15T08:27:12,236 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-15T08:27:12,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-15T08:27:12,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=51, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-15T08:27:12,240 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659232240"}]},"ts":"1731659232240"} 2024-11-15T08:27:12,242 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-15T08:27:12,242 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-15T08:27:12,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-15T08:27:12,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, UNASSIGN}, {pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, UNASSIGN}] 2024-11-15T08:27:12,245 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, UNASSIGN 2024-11-15T08:27:12,245 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, UNASSIGN 2024-11-15T08:27:12,246 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=c5fbc719b9109b686ccc0c7d4ece1343, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:27:12,246 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=215aac9a0193d4f77a31bd916345d889, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:27:12,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=54, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, UNASSIGN because future has completed 2024-11-15T08:27:12,248 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:27:12,248 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:27:12,248 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=53, ppid=52, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, UNASSIGN because future has completed 2024-11-15T08:27:12,249 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:27:12,249 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 215aac9a0193d4f77a31bd916345d889, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:27:12,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-15T08:27:12,400 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(122): Close c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:27:12,401 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:27:12,401 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1722): Closing c5fbc719b9109b686ccc0c7d4ece1343, disabling compactions & flushes 2024-11-15T08:27:12,401 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:27:12,401 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:27:12,401 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. after waiting 0 ms 2024-11-15T08:27:12,401 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:27:12,403 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(122): Close 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:27:12,403 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:27:12,403 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1722): Closing 215aac9a0193d4f77a31bd916345d889, disabling compactions & flushes 2024-11-15T08:27:12,403 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:27:12,403 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:27:12,403 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. after waiting 0 ms 2024-11-15T08:27:12,403 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:27:12,408 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:27:12,409 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:27:12,409 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343. 2024-11-15T08:27:12,409 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] regionserver.HRegion(1676): Region close journal for c5fbc719b9109b686ccc0c7d4ece1343: Waiting for close lock at 1731659232401Running coprocessor pre-close hooks at 1731659232401Disabling compacts and flushes for region at 1731659232401Disabling writes for close at 1731659232401Writing region close event to WAL at 1731659232402 (+1 ms)Running coprocessor post-close hooks at 1731659232409 (+7 ms)Closed at 1731659232409 2024-11-15T08:27:12,409 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:27:12,410 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:27:12,410 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889. 2024-11-15T08:27:12,410 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] regionserver.HRegion(1676): Region close journal for 215aac9a0193d4f77a31bd916345d889: Waiting for close lock at 1731659232403Running coprocessor pre-close hooks at 1731659232403Disabling compacts and flushes for region at 1731659232403Disabling writes for close at 1731659232403Writing region close event to WAL at 1731659232404 (+1 ms)Running coprocessor post-close hooks at 1731659232410 (+6 ms)Closed at 1731659232410 2024-11-15T08:27:12,412 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=55}] handler.UnassignRegionHandler(157): Closed c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:27:12,413 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=54 updating hbase:meta row=c5fbc719b9109b686ccc0c7d4ece1343, regionState=CLOSED 2024-11-15T08:27:12,414 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=56}] handler.UnassignRegionHandler(157): Closed 215aac9a0193d4f77a31bd916345d889 2024-11-15T08:27:12,415 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=53 updating hbase:meta row=215aac9a0193d4f77a31bd916345d889, regionState=CLOSED 2024-11-15T08:27:12,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=55, ppid=54, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:27:12,418 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=56, ppid=53, state=RUNNABLE, hasLock=false; CloseRegionProcedure 215aac9a0193d4f77a31bd916345d889, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:27:12,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=54 2024-11-15T08:27:12,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=54, state=SUCCESS, hasLock=false; CloseRegionProcedure c5fbc719b9109b686ccc0c7d4ece1343, server=48e8ad8dc936,37511,1731659175104 in 169 msec 2024-11-15T08:27:12,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=56, resume processing ppid=53 2024-11-15T08:27:12,426 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c5fbc719b9109b686ccc0c7d4ece1343, UNASSIGN in 179 msec 2024-11-15T08:27:12,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, ppid=53, state=SUCCESS, hasLock=false; CloseRegionProcedure 215aac9a0193d4f77a31bd916345d889, server=48e8ad8dc936,40637,1731659175269 in 172 msec 2024-11-15T08:27:12,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=53, resume processing ppid=52 2024-11-15T08:27:12,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, ppid=52, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=215aac9a0193d4f77a31bd916345d889, UNASSIGN in 182 msec 2024-11-15T08:27:12,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=51 2024-11-15T08:27:12,431 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=51, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 187 msec 2024-11-15T08:27:12,432 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659232432"}]},"ts":"1731659232432"} 2024-11-15T08:27:12,434 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-15T08:27:12,434 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-15T08:27:12,437 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 198 msec 2024-11-15T08:27:12,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=51 2024-11-15T08:27:12,557 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T08:27:12,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-15T08:27:12,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,559 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=57, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-15T08:27:12,560 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=57, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,563 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-15T08:27:12,564 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889 2024-11-15T08:27:12,565 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:27:12,567 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/recovered.edits] 2024-11-15T08:27:12,567 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/recovered.edits] 2024-11-15T08:27:12,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,570 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-15T08:27:12,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:12,573 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf/2828c90a0da94a5593abb644470cdeee to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/cf/2828c90a0da94a5593abb644470cdeee 2024-11-15T08:27:12,573 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf/eaa3389297a74605a8d179a2ad503e1e to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/cf/eaa3389297a74605a8d179a2ad503e1e 2024-11-15T08:27:12,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-15T08:27:12,578 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889/recovered.edits/9.seqid 2024-11-15T08:27:12,579 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/215aac9a0193d4f77a31bd916345d889 2024-11-15T08:27:12,580 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343/recovered.edits/9.seqid 2024-11-15T08:27:12,580 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithResetTtl/c5fbc719b9109b686ccc0c7d4ece1343 2024-11-15T08:27:12,580 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-15T08:27:12,584 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=57, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,588 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-15T08:27:12,594 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-15T08:27:12,596 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=57, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,597 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-15T08:27:12,597 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659232597"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:12,597 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659232597"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:12,600 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:27:12,600 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 215aac9a0193d4f77a31bd916345d889, NAME => 'testtb-testExportWithResetTtl,,1731659209940.215aac9a0193d4f77a31bd916345d889.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c5fbc719b9109b686ccc0c7d4ece1343, NAME => 'testtb-testExportWithResetTtl,1,1731659209940.c5fbc719b9109b686ccc0c7d4ece1343.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:27:12,600 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-15T08:27:12,600 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659232600"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:12,603 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-15T08:27:12,604 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=57, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-15T08:27:12,606 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 47 msec 2024-11-15T08:27:12,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=57 2024-11-15T08:27:12,686 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-15T08:27:12,686 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-15T08:27:12,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-15T08:27:12,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-15T08:27:12,699 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-15T08:27:12,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-15T08:27:12,702 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-15T08:27:12,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-15T08:27:12,736 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=786 (was 771) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1954 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45913 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-676941629_1 at /127.0.0.1:40962 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36871 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41323 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:58620 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:59130 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:38188 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-676941629_1 at /127.0.0.1:42860 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 7151) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:36871 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=413 (was 379) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=1200 (was 1679) 2024-11-15T08:27:12,736 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-15T08:27:12,758 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=786, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=413, ProcessCount=19, AvailableMemoryMB=1199 2024-11-15T08:27:12,758 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-15T08:27:12,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:27:12,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:12,763 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:27:12,763 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:27:12,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 58 2024-11-15T08:27:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T08:27:12,765 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:27:12,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741953_1129 (size=407) 2024-11-15T08:27:12,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741953_1129 (size=407) 2024-11-15T08:27:12,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741953_1129 (size=407) 2024-11-15T08:27:12,777 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => be44a8fd2fcf207d87fc3ae62ea17bfc, NAME => 'testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:12,777 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 93ed4a685de0c7b0ec91c94ddfaf92e9, NAME => 'testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:12,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741955_1131 (size=68) 2024-11-15T08:27:12,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741955_1131 (size=68) 2024-11-15T08:27:12,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741955_1131 (size=68) 2024-11-15T08:27:12,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741954_1130 (size=68) 2024-11-15T08:27:12,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741954_1130 (size=68) 2024-11-15T08:27:12,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741954_1130 (size=68) 2024-11-15T08:27:12,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:12,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing be44a8fd2fcf207d87fc3ae62ea17bfc, disabling compactions & flushes 2024-11-15T08:27:12,800 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:12,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:12,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. after waiting 0 ms 2024-11-15T08:27:12,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:12,800 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:12,800 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for be44a8fd2fcf207d87fc3ae62ea17bfc: Waiting for close lock at 1731659232800Disabling compacts and flushes for region at 1731659232800Disabling writes for close at 1731659232800Writing region close event to WAL at 1731659232800Closed at 1731659232800 2024-11-15T08:27:12,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T08:27:13,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T08:27:13,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:13,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 93ed4a685de0c7b0ec91c94ddfaf92e9, disabling compactions & flushes 2024-11-15T08:27:13,200 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. after waiting 0 ms 2024-11-15T08:27:13,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,200 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,200 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 93ed4a685de0c7b0ec91c94ddfaf92e9: Waiting for close lock at 1731659233200Disabling compacts and flushes for region at 1731659233200Disabling writes for close at 1731659233200Writing region close event to WAL at 1731659233200Closed at 1731659233200 2024-11-15T08:27:13,202 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:27:13,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731659233202"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659233202"}]},"ts":"1731659233202"} 2024-11-15T08:27:13,203 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731659233202"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659233202"}]},"ts":"1731659233202"} 2024-11-15T08:27:13,206 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:27:13,207 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:27:13,208 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659233208"}]},"ts":"1731659233208"} 2024-11-15T08:27:13,211 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-15T08:27:13,211 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:27:13,213 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:27:13,213 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:27:13,213 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:27:13,213 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:27:13,214 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, ASSIGN}, {pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, ASSIGN}] 2024-11-15T08:27:13,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, ASSIGN 2024-11-15T08:27:13,216 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, ASSIGN 2024-11-15T08:27:13,218 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:27:13,218 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:27:13,369 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:27:13,369 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=93ed4a685de0c7b0ec91c94ddfaf92e9, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:27:13,369 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=be44a8fd2fcf207d87fc3ae62ea17bfc, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:27:13,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, ASSIGN because future has completed 2024-11-15T08:27:13,372 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:27:13,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, ASSIGN because future has completed 2024-11-15T08:27:13,373 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:27:13,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T08:27:13,476 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:27:13,528 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:13,529 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7752): Opening region: {ENCODED => be44a8fd2fcf207d87fc3ae62ea17bfc, NAME => 'testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:27:13,529 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. service=AccessControlService 2024-11-15T08:27:13,529 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:27:13,530 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,530 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:13,530 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7794): checking encryption for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,530 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(7797): checking classloading for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,532 INFO [StoreOpener-be44a8fd2fcf207d87fc3ae62ea17bfc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,533 INFO [StoreOpener-be44a8fd2fcf207d87fc3ae62ea17bfc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region be44a8fd2fcf207d87fc3ae62ea17bfc columnFamilyName cf 2024-11-15T08:27:13,534 DEBUG [StoreOpener-be44a8fd2fcf207d87fc3ae62ea17bfc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:27:13,535 INFO [StoreOpener-be44a8fd2fcf207d87fc3ae62ea17bfc-1 {}] regionserver.HStore(327): Store=be44a8fd2fcf207d87fc3ae62ea17bfc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:27:13,535 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1038): replaying wal for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,535 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,535 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7752): Opening region: {ENCODED => 93ed4a685de0c7b0ec91c94ddfaf92e9, NAME => 'testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:27:13,535 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. service=AccessControlService 2024-11-15T08:27:13,535 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,536 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:27:13,536 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,536 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,536 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:13,536 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7794): checking encryption for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,536 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(7797): checking classloading for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,536 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1048): stopping wal replay for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,537 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1060): Cleaning up temporary data for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,538 INFO [StoreOpener-93ed4a685de0c7b0ec91c94ddfaf92e9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,539 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1093): writing seq id for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,540 INFO [StoreOpener-93ed4a685de0c7b0ec91c94ddfaf92e9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 93ed4a685de0c7b0ec91c94ddfaf92e9 columnFamilyName cf 2024-11-15T08:27:13,540 DEBUG [StoreOpener-93ed4a685de0c7b0ec91c94ddfaf92e9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:27:13,541 INFO [StoreOpener-93ed4a685de0c7b0ec91c94ddfaf92e9-1 {}] regionserver.HStore(327): Store=93ed4a685de0c7b0ec91c94ddfaf92e9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:27:13,541 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1038): replaying wal for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,542 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,543 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,543 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:27:13,543 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1048): stopping wal replay for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,543 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1060): Cleaning up temporary data for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,544 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1114): Opened be44a8fd2fcf207d87fc3ae62ea17bfc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58862059, jitterRate=-0.12288697063922882}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:27:13,544 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1122): Running coprocessor post-open hooks for be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,544 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegion(1006): Region open journal for be44a8fd2fcf207d87fc3ae62ea17bfc: Running coprocessor pre-open hook at 1731659233530Writing region info on filesystem at 1731659233530Initializing all the Stores at 1731659233531 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659233531Cleaning up temporary data from old regions at 1731659233537 (+6 ms)Running coprocessor post-open hooks at 1731659233544 (+7 ms)Region opened successfully at 1731659233544 2024-11-15T08:27:13,545 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1093): writing seq id for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,545 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc., pid=61, masterSystemTime=1731659233524 2024-11-15T08:27:13,547 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:27:13,547 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1114): Opened 93ed4a685de0c7b0ec91c94ddfaf92e9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64800688, jitterRate=-0.03439450263977051}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:27:13,547 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:13,547 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:13,548 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegion(1006): Region open journal for 93ed4a685de0c7b0ec91c94ddfaf92e9: Running coprocessor pre-open hook at 1731659233536Writing region info on filesystem at 1731659233536Initializing all the Stores at 1731659233537 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659233538 (+1 ms)Cleaning up temporary data from old regions at 1731659233543 (+5 ms)Running coprocessor post-open hooks at 1731659233547 (+4 ms)Region opened successfully at 1731659233547 2024-11-15T08:27:13,548 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=61}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:13,548 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=be44a8fd2fcf207d87fc3ae62ea17bfc, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:27:13,548 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., pid=62, masterSystemTime=1731659233527 2024-11-15T08:27:13,550 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,550 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=62}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:13,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; OpenRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:27:13,551 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=60 updating hbase:meta row=93ed4a685de0c7b0ec91c94ddfaf92e9, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:27:13,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=62, ppid=60, state=RUNNABLE, hasLock=false; OpenRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:27:13,554 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-15T08:27:13,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; OpenRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc, server=48e8ad8dc936,37511,1731659175104 in 180 msec 2024-11-15T08:27:13,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=62, resume processing ppid=60 2024-11-15T08:27:13,556 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, ppid=60, state=SUCCESS, hasLock=false; OpenRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9, server=48e8ad8dc936,34555,1731659175344 in 181 msec 2024-11-15T08:27:13,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, ASSIGN in 341 msec 2024-11-15T08:27:13,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-15T08:27:13,558 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, ASSIGN in 342 msec 2024-11-15T08:27:13,558 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:27:13,559 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659233559"}]},"ts":"1731659233559"} 2024-11-15T08:27:13,560 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-15T08:27:13,561 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=58, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:27:13,562 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-15T08:27:13,565 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T08:27:13,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:13,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:13,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:13,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:13,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:13,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:13,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:13,569 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:13,571 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 809 msec 2024-11-15T08:27:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=58 2024-11-15T08:27:13,907 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T08:27:13,907 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-11-15T08:27:13,907 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:27:13,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-11-15T08:27:13,911 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:27:13,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-11-15T08:27:13,911 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:27:13,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:27:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659233914 (current time:1731659233914). 2024-11-15T08:27:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:27:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-15T08:27:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:27:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1919fd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:13,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:13,916 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@515dcedf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:13,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:13,917 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:13,918 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52018, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:13,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d48209d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:13,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:13,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:13,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:13,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:13,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:13,922 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f2e7072, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:13,924 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:13,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:13,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:13,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6893904a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:13,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:13,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:13,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:13,926 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52028, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:13,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66ed37cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:13,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:13,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:13,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:13,929 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:13,931 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:27:13,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:13,932 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:13,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T08:27:13,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:27:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:27:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-15T08:27:13,935 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:27:13,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-15T08:27:13,936 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:27:13,938 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:27:13,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741956_1132 (size=170) 2024-11-15T08:27:13,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741956_1132 (size=170) 2024-11-15T08:27:13,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741956_1132 (size=170) 2024-11-15T08:27:13,949 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:27:13,949 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc}, {pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9}] 2024-11-15T08:27:13,950 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:13,950 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-15T08:27:14,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=64 2024-11-15T08:27:14,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=65 2024-11-15T08:27:14,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:14,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:14,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.HRegion(2603): Flush status journal for 93ed4a685de0c7b0ec91c94ddfaf92e9: 2024-11-15T08:27:14,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.HRegion(2603): Flush status journal for be44a8fd2fcf207d87fc3ae62ea17bfc: 2024-11-15T08:27:14,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. for emptySnaptb0-testExportFileSystemState completed. 2024-11-15T08:27:14,102 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. for emptySnaptb0-testExportFileSystemState completed. 2024-11-15T08:27:14,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-15T08:27:14,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:14,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-15T08:27:14,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:27:14,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:14,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:27:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741957_1133 (size=71) 2024-11-15T08:27:14,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741957_1133 (size=71) 2024-11-15T08:27:14,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741957_1133 (size=71) 2024-11-15T08:27:14,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:14,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=64}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=64 2024-11-15T08:27:14,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=64 2024-11-15T08:27:14,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:14,112 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:14,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc in 164 msec 2024-11-15T08:27:14,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741958_1134 (size=71) 2024-11-15T08:27:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741958_1134 (size=71) 2024-11-15T08:27:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741958_1134 (size=71) 2024-11-15T08:27:14,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:14,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=65}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=65 2024-11-15T08:27:14,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=65 2024-11-15T08:27:14,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:14,119 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:14,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-11-15T08:27:14,122 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9 in 171 msec 2024-11-15T08:27:14,122 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:27:14,123 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:27:14,124 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:27:14,124 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-15T08:27:14,125 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-15T08:27:14,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741959_1135 (size=552) 2024-11-15T08:27:14,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741959_1135 (size=552) 2024-11-15T08:27:14,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741959_1135 (size=552) 2024-11-15T08:27:14,139 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:27:14,144 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:27:14,144 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-15T08:27:14,145 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=63, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:27:14,146 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 63 2024-11-15T08:27:14,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=63, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 213 msec 2024-11-15T08:27:14,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-15T08:27:14,257 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T08:27:14,261 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0912f14e33e61a7358ddfb5c0190aa11b', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:14,262 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='1e4aa1462b018ff580b4b878fee087f73', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:27:14,263 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='237f5dfd3e9059fc4ff634aec5b2208c2', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:27:14,264 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3ba966b99491cb005bc968c759f6b446b', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:27:14,265 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='42c3ca6e6b1130f5d1ef2b297b423c50e', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:27:14,266 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='54de0fcd7854c89b00a0e29db19b96555', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:27:14,266 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='650c92d368c304c4ba2e4724c1304e435', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:27:14,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:27:14,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34555 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:27:14,272 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:27:14,274 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-15T08:27:14,274 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:14,274 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:27:14,276 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:27:14,280 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:27:14,286 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:27:14,288 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:27:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659234288 (current time:1731659234288). 2024-11-15T08:27:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:27:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-15T08:27:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:27:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd596d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:14,290 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1128a6c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:14,291 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:14,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@570dbdcc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:14,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:14,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:14,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:14,294 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40890, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:14,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:14,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:14,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:14,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:14,295 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a686512, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:14,297 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:14,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:14,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:14,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51c3b64c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:14,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:14,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:14,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:14,298 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52076, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:14,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b00db14, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:14,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:14,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:14,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:14,301 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40900, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:14,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:27:14,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:14,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:14,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:14,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:14,304 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:14,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T08:27:14,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:27:14,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:27:14,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-15T08:27:14,306 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:27:14,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T08:27:14,307 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:27:14,309 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:27:14,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741960_1136 (size=165) 2024-11-15T08:27:14,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741960_1136 (size=165) 2024-11-15T08:27:14,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741960_1136 (size=165) 2024-11-15T08:27:14,318 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:27:14,318 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc}, {pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9}] 2024-11-15T08:27:14,319 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:14,319 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:14,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T08:27:14,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=67 2024-11-15T08:27:14,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=68 2024-11-15T08:27:14,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:14,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:14,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2902): Flushing be44a8fd2fcf207d87fc3ae62ea17bfc 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T08:27:14,472 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2902): Flushing 93ed4a685de0c7b0ec91c94ddfaf92e9 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T08:27:14,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/.tmp/cf/801de5da2e8548ffa60d6d60e8560e08 is 71, key is 02a64f1c278bd6a71305f493a8cd6f1a/cf:q/1731659234267/Put/seqid=0 2024-11-15T08:27:14,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/.tmp/cf/0d33b8c0467845068f460b82137eeed1 is 71, key is 1e647e53764329f5393a4f98229179d2/cf:q/1731659234270/Put/seqid=0 2024-11-15T08:27:14,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741962_1138 (size=8258) 2024-11-15T08:27:14,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741961_1137 (size=5354) 2024-11-15T08:27:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741962_1138 (size=8258) 2024-11-15T08:27:14,507 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/.tmp/cf/801de5da2e8548ffa60d6d60e8560e08 2024-11-15T08:27:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741961_1137 (size=5354) 2024-11-15T08:27:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741962_1138 (size=8258) 2024-11-15T08:27:14,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741961_1137 (size=5354) 2024-11-15T08:27:14,512 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/.tmp/cf/0d33b8c0467845068f460b82137eeed1 2024-11-15T08:27:14,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/.tmp/cf/801de5da2e8548ffa60d6d60e8560e08 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf/801de5da2e8548ffa60d6d60e8560e08 2024-11-15T08:27:14,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/.tmp/cf/0d33b8c0467845068f460b82137eeed1 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf/0d33b8c0467845068f460b82137eeed1 2024-11-15T08:27:14,520 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf/801de5da2e8548ffa60d6d60e8560e08, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T08:27:14,521 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for be44a8fd2fcf207d87fc3ae62ea17bfc in 50ms, sequenceid=6, compaction requested=false 2024-11-15T08:27:14,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-15T08:27:14,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.HRegion(2603): Flush status journal for be44a8fd2fcf207d87fc3ae62ea17bfc: 2024-11-15T08:27:14,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. for snaptb0-testExportFileSystemState completed. 2024-11-15T08:27:14,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-15T08:27:14,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:14,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf/801de5da2e8548ffa60d6d60e8560e08] hfiles 2024-11-15T08:27:14,522 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf/801de5da2e8548ffa60d6d60e8560e08 for snapshot=snaptb0-testExportFileSystemState 2024-11-15T08:27:14,524 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf/0d33b8c0467845068f460b82137eeed1, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T08:27:14,526 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 93ed4a685de0c7b0ec91c94ddfaf92e9 in 55ms, sequenceid=6, compaction requested=false 2024-11-15T08:27:14,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.HRegion(2603): Flush status journal for 93ed4a685de0c7b0ec91c94ddfaf92e9: 2024-11-15T08:27:14,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. for snaptb0-testExportFileSystemState completed. 2024-11-15T08:27:14,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-15T08:27:14,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:14,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf/0d33b8c0467845068f460b82137eeed1] hfiles 2024-11-15T08:27:14,526 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf/0d33b8c0467845068f460b82137eeed1 for snapshot=snaptb0-testExportFileSystemState 2024-11-15T08:27:14,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741963_1139 (size=110) 2024-11-15T08:27:14,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741963_1139 (size=110) 2024-11-15T08:27:14,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741963_1139 (size=110) 2024-11-15T08:27:14,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:14,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=67}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=67 2024-11-15T08:27:14,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=67 2024-11-15T08:27:14,530 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:14,531 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=67, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:14,533 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc in 213 msec 2024-11-15T08:27:14,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741964_1140 (size=110) 2024-11-15T08:27:14,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741964_1140 (size=110) 2024-11-15T08:27:14,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741964_1140 (size=110) 2024-11-15T08:27:14,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:14,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-11-15T08:27:14,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=68 2024-11-15T08:27:14,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:14,539 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=68, ppid=66, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:14,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=68, resume processing ppid=66 2024-11-15T08:27:14,542 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, ppid=66, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9 in 222 msec 2024-11-15T08:27:14,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:27:14,543 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:27:14,543 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:27:14,543 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-15T08:27:14,544 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-15T08:27:14,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741965_1141 (size=630) 2024-11-15T08:27:14,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741965_1141 (size=630) 2024-11-15T08:27:14,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741965_1141 (size=630) 2024-11-15T08:27:14,558 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:27:14,580 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:27:14,580 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-15T08:27:14,586 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=66, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:27:14,587 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 66 2024-11-15T08:27:14,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=66, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 283 msec 2024-11-15T08:27:14,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=66 2024-11-15T08:27:14,626 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T08:27:14,627 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627 2024-11-15T08:27:14,627 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:14,668 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:14,668 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-15T08:27:14,671 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:27:14,682 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-15T08:27:14,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741967_1143 (size=630) 2024-11-15T08:27:14,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741967_1143 (size=630) 2024-11-15T08:27:14,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741967_1143 (size=630) 2024-11-15T08:27:14,716 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-15T08:27:14,716 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-15T08:27:14,716 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-15T08:27:14,717 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-15T08:27:14,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741966_1142 (size=165) 2024-11-15T08:27:14,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741966_1142 (size=165) 2024-11-15T08:27:14,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741966_1142 (size=165) 2024-11-15T08:27:14,721 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:14,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:14,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-168849710021470604.jar 2024-11-15T08:27:15,778 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,779 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-13450143156110057973.jar 2024-11-15T08:27:15,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,848 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:15,849 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:27:15,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:27:15,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:27:15,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:27:15,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:27:15,850 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:27:15,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:27:15,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:27:15,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:27:15,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:27:15,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:27:15,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:15,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:15,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:15,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:15,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:15,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:15,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:15,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741968_1144 (size=131440) 2024-11-15T08:27:15,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741968_1144 (size=131440) 2024-11-15T08:27:15,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741968_1144 (size=131440) 2024-11-15T08:27:15,954 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0002_000001 (auth:SIMPLE) from 127.0.0.1:38066 2024-11-15T08:27:15,966 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0002/container_1731659181416_0002_01_000001/launch_container.sh] 2024-11-15T08:27:15,966 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0002/container_1731659181416_0002_01_000001/container_tokens] 2024-11-15T08:27:15,966 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0002/container_1731659181416_0002_01_000001/sysfs] 2024-11-15T08:27:15,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741969_1145 (size=4188619) 2024-11-15T08:27:15,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741969_1145 (size=4188619) 2024-11-15T08:27:15,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741969_1145 (size=4188619) 2024-11-15T08:27:15,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741970_1146 (size=1323991) 2024-11-15T08:27:15,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741970_1146 (size=1323991) 2024-11-15T08:27:15,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741970_1146 (size=1323991) 2024-11-15T08:27:16,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741971_1147 (size=903738) 2024-11-15T08:27:16,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741971_1147 (size=903738) 2024-11-15T08:27:16,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741971_1147 (size=903738) 2024-11-15T08:27:16,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741972_1148 (size=8360083) 2024-11-15T08:27:16,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741972_1148 (size=8360083) 2024-11-15T08:27:16,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741972_1148 (size=8360083) 2024-11-15T08:27:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741973_1149 (size=1877034) 2024-11-15T08:27:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741973_1149 (size=1877034) 2024-11-15T08:27:16,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741973_1149 (size=1877034) 2024-11-15T08:27:16,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741974_1150 (size=77835) 2024-11-15T08:27:16,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741974_1150 (size=77835) 2024-11-15T08:27:16,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741974_1150 (size=77835) 2024-11-15T08:27:16,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741975_1151 (size=30949) 2024-11-15T08:27:16,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741975_1151 (size=30949) 2024-11-15T08:27:16,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741975_1151 (size=30949) 2024-11-15T08:27:16,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741976_1152 (size=1597327) 2024-11-15T08:27:16,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741976_1152 (size=1597327) 2024-11-15T08:27:16,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741976_1152 (size=1597327) 2024-11-15T08:27:16,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741977_1153 (size=4695811) 2024-11-15T08:27:16,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741977_1153 (size=4695811) 2024-11-15T08:27:16,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741977_1153 (size=4695811) 2024-11-15T08:27:16,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741978_1154 (size=440656) 2024-11-15T08:27:16,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741978_1154 (size=440656) 2024-11-15T08:27:16,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741978_1154 (size=440656) 2024-11-15T08:27:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741979_1155 (size=232957) 2024-11-15T08:27:16,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741979_1155 (size=232957) 2024-11-15T08:27:16,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741979_1155 (size=232957) 2024-11-15T08:27:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741980_1156 (size=127628) 2024-11-15T08:27:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741980_1156 (size=127628) 2024-11-15T08:27:16,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741980_1156 (size=127628) 2024-11-15T08:27:17,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741981_1157 (size=20406) 2024-11-15T08:27:17,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741981_1157 (size=20406) 2024-11-15T08:27:17,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741981_1157 (size=20406) 2024-11-15T08:27:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741982_1158 (size=5175431) 2024-11-15T08:27:17,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741982_1158 (size=5175431) 2024-11-15T08:27:17,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741982_1158 (size=5175431) 2024-11-15T08:27:17,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741983_1159 (size=217634) 2024-11-15T08:27:17,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741983_1159 (size=217634) 2024-11-15T08:27:17,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741983_1159 (size=217634) 2024-11-15T08:27:17,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741984_1160 (size=1832290) 2024-11-15T08:27:17,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741984_1160 (size=1832290) 2024-11-15T08:27:17,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741984_1160 (size=1832290) 2024-11-15T08:27:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741985_1161 (size=322274) 2024-11-15T08:27:17,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741985_1161 (size=322274) 2024-11-15T08:27:17,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741985_1161 (size=322274) 2024-11-15T08:27:17,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741986_1162 (size=503880) 2024-11-15T08:27:17,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741986_1162 (size=503880) 2024-11-15T08:27:17,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741986_1162 (size=503880) 2024-11-15T08:27:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741987_1163 (size=29229) 2024-11-15T08:27:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741987_1163 (size=29229) 2024-11-15T08:27:17,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741987_1163 (size=29229) 2024-11-15T08:27:17,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741988_1164 (size=24096) 2024-11-15T08:27:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741988_1164 (size=24096) 2024-11-15T08:27:17,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741988_1164 (size=24096) 2024-11-15T08:27:17,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741989_1165 (size=6424751) 2024-11-15T08:27:17,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741989_1165 (size=6424751) 2024-11-15T08:27:17,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741989_1165 (size=6424751) 2024-11-15T08:27:17,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741990_1166 (size=111872) 2024-11-15T08:27:17,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741990_1166 (size=111872) 2024-11-15T08:27:17,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741990_1166 (size=111872) 2024-11-15T08:27:17,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741991_1167 (size=45609) 2024-11-15T08:27:17,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741991_1167 (size=45609) 2024-11-15T08:27:17,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741991_1167 (size=45609) 2024-11-15T08:27:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741992_1168 (size=136454) 2024-11-15T08:27:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741992_1168 (size=136454) 2024-11-15T08:27:17,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741992_1168 (size=136454) 2024-11-15T08:27:17,196 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:27:17,199 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-15T08:27:17,201 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:27:17,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741993_1169 (size=344) 2024-11-15T08:27:17,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741993_1169 (size=344) 2024-11-15T08:27:17,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741993_1169 (size=344) 2024-11-15T08:27:17,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741994_1170 (size=15) 2024-11-15T08:27:17,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741994_1170 (size=15) 2024-11-15T08:27:17,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741994_1170 (size=15) 2024-11-15T08:27:17,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741995_1171 (size=303738) 2024-11-15T08:27:17,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741995_1171 (size=303738) 2024-11-15T08:27:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741995_1171 (size=303738) 2024-11-15T08:27:17,261 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:27:17,261 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:27:17,508 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:27:17,931 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0003_000001 (auth:SIMPLE) from 127.0.0.1:38076 2024-11-15T08:27:17,990 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 93ed4a685de0c7b0ec91c94ddfaf92e9 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:27:17,991 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region be44a8fd2fcf207d87fc3ae62ea17bfc changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:27:17,991 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 626e7c98f2efa065104d27dc4f61e6c7 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:27:24,304 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0003_000001 (auth:SIMPLE) from 127.0.0.1:48372 2024-11-15T08:27:24,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741996_1172 (size=349388) 2024-11-15T08:27:24,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741996_1172 (size=349388) 2024-11-15T08:27:24,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741996_1172 (size=349388) 2024-11-15T08:27:26,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0003_000001 (auth:SIMPLE) from 127.0.0.1:49256 2024-11-15T08:27:30,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741997_1173 (size=8258) 2024-11-15T08:27:30,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741997_1173 (size=8258) 2024-11-15T08:27:30,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741997_1173 (size=8258) 2024-11-15T08:27:30,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741998_1174 (size=5354) 2024-11-15T08:27:30,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741998_1174 (size=5354) 2024-11-15T08:27:30,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741998_1174 (size=5354) 2024-11-15T08:27:30,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741999_1175 (size=17422) 2024-11-15T08:27:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741999_1175 (size=17422) 2024-11-15T08:27:30,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741999_1175 (size=17422) 2024-11-15T08:27:30,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742000_1176 (size=465) 2024-11-15T08:27:30,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742000_1176 (size=465) 2024-11-15T08:27:30,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742000_1176 (size=465) 2024-11-15T08:27:30,574 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0003/container_1731659181416_0003_01_000002/launch_container.sh] 2024-11-15T08:27:30,574 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0003/container_1731659181416_0003_01_000002/container_tokens] 2024-11-15T08:27:30,575 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0003/container_1731659181416_0003_01_000002/sysfs] 2024-11-15T08:27:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742001_1177 (size=17422) 2024-11-15T08:27:30,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742001_1177 (size=17422) 2024-11-15T08:27:30,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742001_1177 (size=17422) 2024-11-15T08:27:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742002_1178 (size=349388) 2024-11-15T08:27:30,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742002_1178 (size=349388) 2024-11-15T08:27:30,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742002_1178 (size=349388) 2024-11-15T08:27:30,621 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0003_000001 (auth:SIMPLE) from 127.0.0.1:49264 2024-11-15T08:27:32,411 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:27:32,413 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:27:32,420 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemState 2024-11-15T08:27:32,420 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:27:32,421 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:27:32,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-15T08:27:32,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-15T08:27:32,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-15T08:27:32,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-15T08:27:32,422 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-15T08:27:32,422 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659234627/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-15T08:27:32,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-15T08:27:32,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=69, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-15T08:27:32,432 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659252432"}]},"ts":"1731659252432"} 2024-11-15T08:27:32,434 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-15T08:27:32,434 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-15T08:27:32,435 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-15T08:27:32,436 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, UNASSIGN}, {pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, UNASSIGN}] 2024-11-15T08:27:32,437 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, UNASSIGN 2024-11-15T08:27:32,437 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, UNASSIGN 2024-11-15T08:27:32,438 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=93ed4a685de0c7b0ec91c94ddfaf92e9, regionState=CLOSING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:27:32,438 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=be44a8fd2fcf207d87fc3ae62ea17bfc, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:27:32,440 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=72, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, UNASSIGN because future has completed 2024-11-15T08:27:32,441 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:27:32,441 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=73, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:27:32,441 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=71, ppid=70, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, UNASSIGN because future has completed 2024-11-15T08:27:32,442 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:27:32,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=74, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:27:32,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-15T08:27:32,593 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(122): Close 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:32,593 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1722): Closing 93ed4a685de0c7b0ec91c94ddfaf92e9, disabling compactions & flushes 2024-11-15T08:27:32,594 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. after waiting 0 ms 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:32,594 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(122): Close be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1722): Closing be44a8fd2fcf207d87fc3ae62ea17bfc, disabling compactions & flushes 2024-11-15T08:27:32,594 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:32,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. after waiting 0 ms 2024-11-15T08:27:32,595 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:32,599 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:27:32,599 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:27:32,600 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:27:32,600 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc. 2024-11-15T08:27:32,600 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] regionserver.HRegion(1676): Region close journal for be44a8fd2fcf207d87fc3ae62ea17bfc: Waiting for close lock at 1731659252594Running coprocessor pre-close hooks at 1731659252594Disabling compacts and flushes for region at 1731659252594Disabling writes for close at 1731659252594Writing region close event to WAL at 1731659252595 (+1 ms)Running coprocessor post-close hooks at 1731659252600 (+5 ms)Closed at 1731659252600 2024-11-15T08:27:32,600 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:27:32,600 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9. 2024-11-15T08:27:32,600 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] regionserver.HRegion(1676): Region close journal for 93ed4a685de0c7b0ec91c94ddfaf92e9: Waiting for close lock at 1731659252594Running coprocessor pre-close hooks at 1731659252594Disabling compacts and flushes for region at 1731659252594Disabling writes for close at 1731659252594Writing region close event to WAL at 1731659252594Running coprocessor post-close hooks at 1731659252600 (+6 ms)Closed at 1731659252600 2024-11-15T08:27:32,602 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=74}] handler.UnassignRegionHandler(157): Closed be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:32,602 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=71 updating hbase:meta row=be44a8fd2fcf207d87fc3ae62ea17bfc, regionState=CLOSED 2024-11-15T08:27:32,602 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=73}] handler.UnassignRegionHandler(157): Closed 93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:32,603 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=72 updating hbase:meta row=93ed4a685de0c7b0ec91c94ddfaf92e9, regionState=CLOSED 2024-11-15T08:27:32,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=74, ppid=71, state=RUNNABLE, hasLock=false; CloseRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:27:32,606 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=74, resume processing ppid=71 2024-11-15T08:27:32,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, ppid=71, state=SUCCESS, hasLock=false; CloseRegionProcedure be44a8fd2fcf207d87fc3ae62ea17bfc, server=48e8ad8dc936,37511,1731659175104 in 163 msec 2024-11-15T08:27:32,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=73, ppid=72, state=RUNNABLE, hasLock=false; CloseRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:27:32,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=be44a8fd2fcf207d87fc3ae62ea17bfc, UNASSIGN in 171 msec 2024-11-15T08:27:32,609 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=72 2024-11-15T08:27:32,609 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=72, state=SUCCESS, hasLock=false; CloseRegionProcedure 93ed4a685de0c7b0ec91c94ddfaf92e9, server=48e8ad8dc936,34555,1731659175344 in 167 msec 2024-11-15T08:27:32,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=70 2024-11-15T08:27:32,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=70, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=93ed4a685de0c7b0ec91c94ddfaf92e9, UNASSIGN in 173 msec 2024-11-15T08:27:32,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=69 2024-11-15T08:27:32,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=69, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 177 msec 2024-11-15T08:27:32,615 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659252614"}]},"ts":"1731659252614"} 2024-11-15T08:27:32,617 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-15T08:27:32,617 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-15T08:27:32,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 188 msec 2024-11-15T08:27:32,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=69 2024-11-15T08:27:32,746 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T08:27:32,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-15T08:27:32,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,749 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=75, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-15T08:27:32,749 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=75, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,752 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-15T08:27:32,754 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:32,754 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,756 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/recovered.edits] 2024-11-15T08:27:32,756 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/recovered.edits] 2024-11-15T08:27:32,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,757 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-15T08:27:32,757 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-15T08:27:32,757 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-15T08:27:32,757 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:32,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-15T08:27:32,761 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf/0d33b8c0467845068f460b82137eeed1 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/cf/0d33b8c0467845068f460b82137eeed1 2024-11-15T08:27:32,764 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf/801de5da2e8548ffa60d6d60e8560e08 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/cf/801de5da2e8548ffa60d6d60e8560e08 2024-11-15T08:27:32,764 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9/recovered.edits/9.seqid 2024-11-15T08:27:32,765 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/93ed4a685de0c7b0ec91c94ddfaf92e9 2024-11-15T08:27:32,767 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc/recovered.edits/9.seqid 2024-11-15T08:27:32,768 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemState/be44a8fd2fcf207d87fc3ae62ea17bfc 2024-11-15T08:27:32,768 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-15T08:27:32,770 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=75, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,774 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-15T08:27:32,777 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-15T08:27:32,779 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=75, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,779 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-15T08:27:32,779 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659252779"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:32,779 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659252779"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:32,782 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:27:32,782 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => be44a8fd2fcf207d87fc3ae62ea17bfc, NAME => 'testtb-testExportFileSystemState,,1731659232759.be44a8fd2fcf207d87fc3ae62ea17bfc.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 93ed4a685de0c7b0ec91c94ddfaf92e9, NAME => 'testtb-testExportFileSystemState,1,1731659232759.93ed4a685de0c7b0ec91c94ddfaf92e9.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:27:32,782 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-15T08:27:32,782 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659252782"}]},"ts":"9223372036854775807"} 2024-11-15T08:27:32,784 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-15T08:27:32,784 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=75, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-15T08:27:32,785 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 37 msec 2024-11-15T08:27:32,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=75 2024-11-15T08:27:32,867 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-15T08:27:32,867 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-15T08:27:32,875 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-15T08:27:32,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-15T08:27:32,878 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-15T08:27:32,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-15T08:27:32,904 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=786 (was 786), OpenFileDescriptor=801 (was 803), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=402 (was 413), ProcessCount=19 (was 19), AvailableMemoryMB=1099 (was 1199) 2024-11-15T08:27:32,905 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-15T08:27:32,921 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=786, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=402, ProcessCount=19, AvailableMemoryMB=1099 2024-11-15T08:27:32,921 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=786 is superior to 500 2024-11-15T08:27:32,923 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:27:32,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:27:32,925 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:27:32,925 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:27:32,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 76 2024-11-15T08:27:32,926 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:27:32,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T08:27:32,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742003_1179 (size=404) 2024-11-15T08:27:32,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742003_1179 (size=404) 2024-11-15T08:27:32,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742003_1179 (size=404) 2024-11-15T08:27:32,936 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 80794e360336d739c4803cc7ed8cd919, NAME => 'testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:32,936 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 84fc98286288801ee1d5ff09c3365f53, NAME => 'testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742004_1180 (size=65) 2024-11-15T08:27:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742005_1181 (size=65) 2024-11-15T08:27:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742005_1181 (size=65) 2024-11-15T08:27:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742005_1181 (size=65) 2024-11-15T08:27:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742004_1180 (size=65) 2024-11-15T08:27:32,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742004_1180 (size=65) 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 80794e360336d739c4803cc7ed8cd919, disabling compactions & flushes 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 84fc98286288801ee1d5ff09c3365f53, disabling compactions & flushes 2024-11-15T08:27:32,948 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:32,948 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:32,948 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. after waiting 0 ms 2024-11-15T08:27:32,949 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:32,949 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. after waiting 0 ms 2024-11-15T08:27:32,949 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:32,949 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:32,949 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:32,949 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 80794e360336d739c4803cc7ed8cd919: Waiting for close lock at 1731659252948Disabling compacts and flushes for region at 1731659252948Disabling writes for close at 1731659252948Writing region close event to WAL at 1731659252949 (+1 ms)Closed at 1731659252949 2024-11-15T08:27:32,949 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 84fc98286288801ee1d5ff09c3365f53: Waiting for close lock at 1731659252948Disabling compacts and flushes for region at 1731659252948Disabling writes for close at 1731659252949 (+1 ms)Writing region close event to WAL at 1731659252949Closed at 1731659252949 2024-11-15T08:27:32,950 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:27:32,950 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731659252950"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659252950"}]},"ts":"1731659252950"} 2024-11-15T08:27:32,950 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731659252950"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659252950"}]},"ts":"1731659252950"} 2024-11-15T08:27:32,953 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:27:32,953 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:27:32,954 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659252954"}]},"ts":"1731659252954"} 2024-11-15T08:27:32,955 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-15T08:27:32,956 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:27:32,957 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:27:32,957 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:27:32,957 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:27:32,957 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:27:32,958 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, ASSIGN}, {pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, ASSIGN}] 2024-11-15T08:27:32,958 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, ASSIGN 2024-11-15T08:27:32,959 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, ASSIGN 2024-11-15T08:27:32,959 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:27:32,959 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:27:33,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T08:27:33,110 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:27:33,110 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=80794e360336d739c4803cc7ed8cd919, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:27:33,110 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=84fc98286288801ee1d5ff09c3365f53, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:27:33,112 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, ASSIGN because future has completed 2024-11-15T08:27:33,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=79, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 80794e360336d739c4803cc7ed8cd919, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:27:33,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=76, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, ASSIGN because future has completed 2024-11-15T08:27:33,114 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84fc98286288801ee1d5ff09c3365f53, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:27:33,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T08:27:33,269 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:33,269 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:33,269 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7752): Opening region: {ENCODED => 84fc98286288801ee1d5ff09c3365f53, NAME => 'testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:27:33,269 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7752): Opening region: {ENCODED => 80794e360336d739c4803cc7ed8cd919, NAME => 'testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:27:33,269 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. service=AccessControlService 2024-11-15T08:27:33,270 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. service=AccessControlService 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:33,270 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7794): checking encryption for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(7797): checking classloading for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7794): checking encryption for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,270 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(7797): checking classloading for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,271 INFO [StoreOpener-84fc98286288801ee1d5ff09c3365f53-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,272 INFO [StoreOpener-80794e360336d739c4803cc7ed8cd919-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,273 INFO [StoreOpener-84fc98286288801ee1d5ff09c3365f53-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84fc98286288801ee1d5ff09c3365f53 columnFamilyName cf 2024-11-15T08:27:33,273 DEBUG [StoreOpener-84fc98286288801ee1d5ff09c3365f53-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:27:33,273 INFO [StoreOpener-80794e360336d739c4803cc7ed8cd919-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 80794e360336d739c4803cc7ed8cd919 columnFamilyName cf 2024-11-15T08:27:33,273 INFO [StoreOpener-84fc98286288801ee1d5ff09c3365f53-1 {}] regionserver.HStore(327): Store=84fc98286288801ee1d5ff09c3365f53/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:27:33,273 DEBUG [StoreOpener-80794e360336d739c4803cc7ed8cd919-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:27:33,274 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1038): replaying wal for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,274 INFO [StoreOpener-80794e360336d739c4803cc7ed8cd919-1 {}] regionserver.HStore(327): Store=80794e360336d739c4803cc7ed8cd919/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:27:33,274 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1038): replaying wal for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,274 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,275 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,275 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,275 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1048): stopping wal replay for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,275 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1060): Cleaning up temporary data for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,275 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,276 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1048): stopping wal replay for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,276 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1060): Cleaning up temporary data for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,277 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1093): writing seq id for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,277 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1093): writing seq id for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,279 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:27:33,279 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:27:33,279 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1114): Opened 84fc98286288801ee1d5ff09c3365f53; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68731671, jitterRate=0.024181708693504333}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:27:33,279 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,279 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1114): Opened 80794e360336d739c4803cc7ed8cd919; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70017537, jitterRate=0.043342605233192444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:27:33,280 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,280 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegion(1006): Region open journal for 80794e360336d739c4803cc7ed8cd919: Running coprocessor pre-open hook at 1731659253270Writing region info on filesystem at 1731659253270Initializing all the Stores at 1731659253272 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659253272Cleaning up temporary data from old regions at 1731659253276 (+4 ms)Running coprocessor post-open hooks at 1731659253280 (+4 ms)Region opened successfully at 1731659253280 2024-11-15T08:27:33,280 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegion(1006): Region open journal for 84fc98286288801ee1d5ff09c3365f53: Running coprocessor pre-open hook at 1731659253270Writing region info on filesystem at 1731659253270Initializing all the Stores at 1731659253271 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659253271Cleaning up temporary data from old regions at 1731659253275 (+4 ms)Running coprocessor post-open hooks at 1731659253279 (+4 ms)Region opened successfully at 1731659253280 (+1 ms) 2024-11-15T08:27:33,281 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., pid=80, masterSystemTime=1731659253266 2024-11-15T08:27:33,281 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919., pid=79, masterSystemTime=1731659253265 2024-11-15T08:27:33,283 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:33,283 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=80}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:33,283 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=78 updating hbase:meta row=84fc98286288801ee1d5ff09c3365f53, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:27:33,284 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:33,284 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=79}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:33,285 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=77 updating hbase:meta row=80794e360336d739c4803cc7ed8cd919, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:27:33,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=80, ppid=78, state=RUNNABLE, hasLock=false; OpenRegionProcedure 84fc98286288801ee1d5ff09c3365f53, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:27:33,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=79, ppid=77, state=RUNNABLE, hasLock=false; OpenRegionProcedure 80794e360336d739c4803cc7ed8cd919, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:27:33,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=78 2024-11-15T08:27:33,289 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=78, state=SUCCESS, hasLock=false; OpenRegionProcedure 84fc98286288801ee1d5ff09c3365f53, server=48e8ad8dc936,37511,1731659175104 in 173 msec 2024-11-15T08:27:33,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=79, resume processing ppid=77 2024-11-15T08:27:33,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, ppid=77, state=SUCCESS, hasLock=false; OpenRegionProcedure 80794e360336d739c4803cc7ed8cd919, server=48e8ad8dc936,40637,1731659175269 in 175 msec 2024-11-15T08:27:33,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, ASSIGN in 332 msec 2024-11-15T08:27:33,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-15T08:27:33,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, ASSIGN in 333 msec 2024-11-15T08:27:33,293 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:27:33,293 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659253293"}]},"ts":"1731659253293"} 2024-11-15T08:27:33,295 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-15T08:27:33,295 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=76, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:27:33,296 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-15T08:27:33,299 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-15T08:27:33,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:33,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:33,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:33,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:27:33,307 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:33,307 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:33,307 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:33,307 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-15T08:27:33,309 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 384 msec 2024-11-15T08:27:33,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=76 2024-11-15T08:27:33,556 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T08:27:33,556 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-11-15T08:27:33,557 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:27:33,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-11-15T08:27:33,560 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:27:33,560 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-11-15T08:27:33,561 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T08:27:33,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T08:27:33,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659253563 (current time:1731659253563). 2024-11-15T08:27:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:27:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-15T08:27:33,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:27:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ae7df19, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:33,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:33,566 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:33,566 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:33,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:33,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55d4dce7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:33,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:33,567 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,568 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39228, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:33,569 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3640ded6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:33,570 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:33,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:33,571 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:33,573 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,573 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74ee3e3c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:33,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:33,575 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:33,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:33,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:33,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6faf75f4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,575 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:33,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:33,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,576 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39254, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:33,577 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e414649, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:33,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:33,579 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:33,580 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37054, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:33,582 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:27:33,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,584 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:33,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-15T08:27:33,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:27:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T08:27:33,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-15T08:27:33,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-15T08:27:33,587 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:27:33,588 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:27:33,590 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:27:33,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742006_1182 (size=161) 2024-11-15T08:27:33,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742006_1182 (size=161) 2024-11-15T08:27:33,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742006_1182 (size=161) 2024-11-15T08:27:33,598 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:27:33,598 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919}, {pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53}] 2024-11-15T08:27:33,599 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,599 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-15T08:27:33,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=82 2024-11-15T08:27:33,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=83 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.HRegion(2603): Flush status journal for 84fc98286288801ee1d5ff09c3365f53: 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.HRegion(2603): Flush status journal for 80794e360336d739c4803cc7ed8cd919: 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. for emptySnaptb0-testConsecutiveExports completed. 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. for emptySnaptb0-testConsecutiveExports completed. 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:33,751 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-15T08:27:33,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:27:33,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:33,752 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:27:33,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742007_1183 (size=68) 2024-11-15T08:27:33,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742007_1183 (size=68) 2024-11-15T08:27:33,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742008_1184 (size=68) 2024-11-15T08:27:33,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742008_1184 (size=68) 2024-11-15T08:27:33,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:33,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742007_1183 (size=68) 2024-11-15T08:27:33,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742008_1184 (size=68) 2024-11-15T08:27:33,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-15T08:27:33,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:33,759 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=83}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=83 2024-11-15T08:27:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=83 2024-11-15T08:27:33,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=82 2024-11-15T08:27:33,760 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,760 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=82, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,760 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=83, ppid=81, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:33,763 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919 in 163 msec 2024-11-15T08:27:33,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=81 2024-11-15T08:27:33,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=81, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53 in 163 msec 2024-11-15T08:27:33,764 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:27:33,764 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:27:33,765 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:27:33,765 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-15T08:27:33,766 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-15T08:27:33,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742009_1185 (size=543) 2024-11-15T08:27:33,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742009_1185 (size=543) 2024-11-15T08:27:33,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742009_1185 (size=543) 2024-11-15T08:27:33,777 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:27:33,781 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:27:33,781 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-15T08:27:33,782 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=81, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:27:33,782 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 81 2024-11-15T08:27:33,784 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=81, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 198 msec 2024-11-15T08:27:33,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=81 2024-11-15T08:27:33,906 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T08:27:33,910 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='0d30c77101f5fa031f46567c9e7a30cb1', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:27:33,911 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='1eaa774746a7b44c4acf80b822151a009', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:33,912 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='2888904c24bc805b52002fafb3262f78e', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:33,914 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='3f1992b0022645f329d8d43278ff600e3', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:33,914 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='48fe825a78e64b0e84ed97a16114c8399', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:33,915 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='597d42672bb3b9c33b52c4d38a009c250', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:33,916 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='6d6b1fe850440a8568b8f58a0038b6084', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:27:33,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:27:33,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:27:33,919 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T08:27:33,921 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-15T08:27:33,921 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:33,922 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:27:33,923 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T08:27:33,928 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T08:27:33,933 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-15T08:27:33,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T08:27:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659253936 (current time:1731659253936). 2024-11-15T08:27:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:27:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-15T08:27:33,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:27:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@639ff086, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:33,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:33,938 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:33,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:33,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:33,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6268d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:33,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:33,939 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,939 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39280, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:33,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4697ddaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:33,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:33,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:33,942 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37058, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:33,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,943 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd1c471, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:27:33,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:27:33,945 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:27:33,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:27:33,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:27:33,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22158456, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:27:33,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:27:33,946 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,946 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:27:33,947 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58526790, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:27:33,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:27:33,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:27:33,948 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:27:33,949 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37066, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:27:33,951 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:27:33,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:27:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:27:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:27:33,953 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:27:33,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-15T08:27:33,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:27:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-15T08:27:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-15T08:27:33,955 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:27:33,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T08:27:33,956 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:27:33,958 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:27:33,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742010_1186 (size=156) 2024-11-15T08:27:33,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742010_1186 (size=156) 2024-11-15T08:27:33,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742010_1186 (size=156) 2024-11-15T08:27:33,968 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:27:33,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919}, {pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53}] 2024-11-15T08:27:33,969 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:33,969 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:34,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T08:27:34,120 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=85 2024-11-15T08:27:34,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=86 2024-11-15T08:27:34,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:34,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:34,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2902): Flushing 80794e360336d739c4803cc7ed8cd919 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-15T08:27:34,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2902): Flushing 84fc98286288801ee1d5ff09c3365f53 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-15T08:27:34,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/.tmp/cf/b4d2ef6b3ae0421fb32f091fd07685ed is 71, key is 012dbbeed2c0dbefc22d458897723e22/cf:q/1731659253916/Put/seqid=0 2024-11-15T08:27:34,139 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/.tmp/cf/3cfa26e821d643f8aa59f719241184a2 is 71, key is 1200debb5986bf06493a182eda6866bb/cf:q/1731659253918/Put/seqid=0 2024-11-15T08:27:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742012_1188 (size=8324) 2024-11-15T08:27:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742011_1187 (size=5288) 2024-11-15T08:27:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742011_1187 (size=5288) 2024-11-15T08:27:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742012_1188 (size=8324) 2024-11-15T08:27:34,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742011_1187 (size=5288) 2024-11-15T08:27:34,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/.tmp/cf/b4d2ef6b3ae0421fb32f091fd07685ed 2024-11-15T08:27:34,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742012_1188 (size=8324) 2024-11-15T08:27:34,147 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/.tmp/cf/3cfa26e821d643f8aa59f719241184a2 2024-11-15T08:27:34,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/.tmp/cf/3cfa26e821d643f8aa59f719241184a2 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf/3cfa26e821d643f8aa59f719241184a2 2024-11-15T08:27:34,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/.tmp/cf/b4d2ef6b3ae0421fb32f091fd07685ed as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf/b4d2ef6b3ae0421fb32f091fd07685ed 2024-11-15T08:27:34,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf/b4d2ef6b3ae0421fb32f091fd07685ed, entries=3, sequenceid=6, filesize=5.2 K 2024-11-15T08:27:34,160 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf/3cfa26e821d643f8aa59f719241184a2, entries=47, sequenceid=6, filesize=8.1 K 2024-11-15T08:27:34,160 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 80794e360336d739c4803cc7ed8cd919 in 39ms, sequenceid=6, compaction requested=false 2024-11-15T08:27:34,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-15T08:27:34,161 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 84fc98286288801ee1d5ff09c3365f53 in 39ms, sequenceid=6, compaction requested=false 2024-11-15T08:27:34,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-15T08:27:34,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.HRegion(2603): Flush status journal for 84fc98286288801ee1d5ff09c3365f53: 2024-11-15T08:27:34,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.HRegion(2603): Flush status journal for 80794e360336d739c4803cc7ed8cd919: 2024-11-15T08:27:34,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. for snaptb0-testConsecutiveExports completed. 2024-11-15T08:27:34,161 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. for snaptb0-testConsecutiveExports completed. 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf/3cfa26e821d643f8aa59f719241184a2] hfiles 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf/3cfa26e821d643f8aa59f719241184a2 for snapshot=snaptb0-testConsecutiveExports 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf/b4d2ef6b3ae0421fb32f091fd07685ed] hfiles 2024-11-15T08:27:34,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf/b4d2ef6b3ae0421fb32f091fd07685ed for snapshot=snaptb0-testConsecutiveExports 2024-11-15T08:27:34,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742013_1189 (size=107) 2024-11-15T08:27:34,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742013_1189 (size=107) 2024-11-15T08:27:34,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742014_1190 (size=107) 2024-11-15T08:27:34,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742013_1189 (size=107) 2024-11-15T08:27:34,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742014_1190 (size=107) 2024-11-15T08:27:34,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:27:34,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-15T08:27:34,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742014_1190 (size=107) 2024-11-15T08:27:34,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:27:34,176 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=85}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=85 2024-11-15T08:27:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=86 2024-11-15T08:27:34,176 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:34,176 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=86, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:27:34,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=85 2024-11-15T08:27:34,176 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:34,176 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=84, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:27:34,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 84fc98286288801ee1d5ff09c3365f53 in 209 msec 2024-11-15T08:27:34,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=84 2024-11-15T08:27:34,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=84, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 80794e360336d739c4803cc7ed8cd919 in 209 msec 2024-11-15T08:27:34,179 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:27:34,180 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:27:34,181 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:27:34,181 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-15T08:27:34,181 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T08:27:34,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742015_1191 (size=621) 2024-11-15T08:27:34,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742015_1191 (size=621) 2024-11-15T08:27:34,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742015_1191 (size=621) 2024-11-15T08:27:34,195 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:27:34,200 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:27:34,200 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T08:27:34,201 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=84, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:27:34,201 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 84 2024-11-15T08:27:34,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=84, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 248 msec 2024-11-15T08:27:34,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=84 2024-11-15T08:27:34,276 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T08:27:34,277 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276 2024-11-15T08:27:34,277 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:34,314 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:34,314 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@491c065d, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T08:27:34,316 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:27:34,319 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T08:27:34,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:34,349 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:34,350 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:34,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-15T08:27:34,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-15T08:27:34,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-15T08:27:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-6147704119716565071.jar 2024-11-15T08:27:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,401 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,485 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-18018278107390405929.jar 2024-11-15T08:27:35,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,486 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:35,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:27:35,487 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:27:35,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:27:35,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:27:35,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:27:35,488 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:27:35,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:27:35,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:27:35,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:27:35,489 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:27:35,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:27:35,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:35,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:35,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:35,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:35,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:35,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:35,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:35,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742016_1192 (size=131440) 2024-11-15T08:27:35,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742016_1192 (size=131440) 2024-11-15T08:27:35,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742016_1192 (size=131440) 2024-11-15T08:27:35,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742017_1193 (size=4188619) 2024-11-15T08:27:35,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742017_1193 (size=4188619) 2024-11-15T08:27:35,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742017_1193 (size=4188619) 2024-11-15T08:27:35,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742018_1194 (size=1323991) 2024-11-15T08:27:35,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742018_1194 (size=1323991) 2024-11-15T08:27:35,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742018_1194 (size=1323991) 2024-11-15T08:27:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742019_1195 (size=903738) 2024-11-15T08:27:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742019_1195 (size=903738) 2024-11-15T08:27:35,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742019_1195 (size=903738) 2024-11-15T08:27:35,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742020_1196 (size=8360083) 2024-11-15T08:27:35,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742020_1196 (size=8360083) 2024-11-15T08:27:35,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742020_1196 (size=8360083) 2024-11-15T08:27:35,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742021_1197 (size=1877034) 2024-11-15T08:27:35,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742021_1197 (size=1877034) 2024-11-15T08:27:35,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742021_1197 (size=1877034) 2024-11-15T08:27:35,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742022_1198 (size=6424751) 2024-11-15T08:27:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742022_1198 (size=6424751) 2024-11-15T08:27:35,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742022_1198 (size=6424751) 2024-11-15T08:27:35,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742023_1199 (size=77835) 2024-11-15T08:27:35,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742023_1199 (size=77835) 2024-11-15T08:27:35,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742023_1199 (size=77835) 2024-11-15T08:27:35,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742024_1200 (size=30949) 2024-11-15T08:27:35,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742024_1200 (size=30949) 2024-11-15T08:27:35,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742024_1200 (size=30949) 2024-11-15T08:27:35,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742025_1201 (size=1597327) 2024-11-15T08:27:35,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742025_1201 (size=1597327) 2024-11-15T08:27:35,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742025_1201 (size=1597327) 2024-11-15T08:27:35,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742026_1202 (size=4695811) 2024-11-15T08:27:35,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742026_1202 (size=4695811) 2024-11-15T08:27:35,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742026_1202 (size=4695811) 2024-11-15T08:27:35,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742027_1203 (size=232957) 2024-11-15T08:27:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742027_1203 (size=232957) 2024-11-15T08:27:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742027_1203 (size=232957) 2024-11-15T08:27:35,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742028_1204 (size=127628) 2024-11-15T08:27:35,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742028_1204 (size=127628) 2024-11-15T08:27:35,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742028_1204 (size=127628) 2024-11-15T08:27:35,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742029_1205 (size=20406) 2024-11-15T08:27:35,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742029_1205 (size=20406) 2024-11-15T08:27:35,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742029_1205 (size=20406) 2024-11-15T08:27:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742030_1206 (size=5175431) 2024-11-15T08:27:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742030_1206 (size=5175431) 2024-11-15T08:27:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742030_1206 (size=5175431) 2024-11-15T08:27:35,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742031_1207 (size=217634) 2024-11-15T08:27:35,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742031_1207 (size=217634) 2024-11-15T08:27:35,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742031_1207 (size=217634) 2024-11-15T08:27:35,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742032_1208 (size=1832290) 2024-11-15T08:27:35,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742032_1208 (size=1832290) 2024-11-15T08:27:35,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742032_1208 (size=1832290) 2024-11-15T08:27:35,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742033_1209 (size=322274) 2024-11-15T08:27:35,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742033_1209 (size=322274) 2024-11-15T08:27:35,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742033_1209 (size=322274) 2024-11-15T08:27:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742034_1210 (size=503880) 2024-11-15T08:27:35,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742034_1210 (size=503880) 2024-11-15T08:27:35,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742034_1210 (size=503880) 2024-11-15T08:27:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742035_1211 (size=440656) 2024-11-15T08:27:35,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742035_1211 (size=440656) 2024-11-15T08:27:35,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742035_1211 (size=440656) 2024-11-15T08:27:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742036_1212 (size=29229) 2024-11-15T08:27:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742036_1212 (size=29229) 2024-11-15T08:27:35,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742036_1212 (size=29229) 2024-11-15T08:27:35,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742037_1213 (size=24096) 2024-11-15T08:27:35,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742037_1213 (size=24096) 2024-11-15T08:27:35,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742037_1213 (size=24096) 2024-11-15T08:27:35,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742038_1214 (size=111872) 2024-11-15T08:27:35,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742038_1214 (size=111872) 2024-11-15T08:27:35,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742038_1214 (size=111872) 2024-11-15T08:27:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742039_1215 (size=45609) 2024-11-15T08:27:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742039_1215 (size=45609) 2024-11-15T08:27:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742039_1215 (size=45609) 2024-11-15T08:27:36,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742040_1216 (size=136454) 2024-11-15T08:27:36,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742040_1216 (size=136454) 2024-11-15T08:27:36,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742040_1216 (size=136454) 2024-11-15T08:27:36,288 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:27:36,290 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-15T08:27:36,292 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:27:36,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742041_1217 (size=338) 2024-11-15T08:27:36,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742041_1217 (size=338) 2024-11-15T08:27:36,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742041_1217 (size=338) 2024-11-15T08:27:36,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742042_1218 (size=15) 2024-11-15T08:27:36,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742042_1218 (size=15) 2024-11-15T08:27:36,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742042_1218 (size=15) 2024-11-15T08:27:36,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742043_1219 (size=303779) 2024-11-15T08:27:36,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742043_1219 (size=303779) 2024-11-15T08:27:36,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742043_1219 (size=303779) 2024-11-15T08:27:36,723 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:27:36,723 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:27:36,729 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0003_000001 (auth:SIMPLE) from 127.0.0.1:48286 2024-11-15T08:27:36,743 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0003/container_1731659181416_0003_01_000001/launch_container.sh] 2024-11-15T08:27:36,743 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0003/container_1731659181416_0003_01_000001/container_tokens] 2024-11-15T08:27:36,743 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0003/container_1731659181416_0003_01_000001/sysfs] 2024-11-15T08:27:37,138 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0004_000001 (auth:SIMPLE) from 127.0.0.1:38530 2024-11-15T08:27:38,129 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:27:43,186 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0004_000001 (auth:SIMPLE) from 127.0.0.1:44358 2024-11-15T08:27:43,476 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:27:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742044_1220 (size=349429) 2024-11-15T08:27:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742044_1220 (size=349429) 2024-11-15T08:27:43,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742044_1220 (size=349429) 2024-11-15T08:27:45,449 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0004_000001 (auth:SIMPLE) from 127.0.0.1:45982 2024-11-15T08:27:50,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742045_1221 (size=17447) 2024-11-15T08:27:50,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742045_1221 (size=17447) 2024-11-15T08:27:50,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742045_1221 (size=17447) 2024-11-15T08:27:50,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742046_1222 (size=462) 2024-11-15T08:27:50,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742046_1222 (size=462) 2024-11-15T08:27:50,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742046_1222 (size=462) 2024-11-15T08:27:50,106 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0004/container_1731659181416_0004_01_000002/launch_container.sh] 2024-11-15T08:27:50,107 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0004/container_1731659181416_0004_01_000002/container_tokens] 2024-11-15T08:27:50,107 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0004/container_1731659181416_0004_01_000002/sysfs] 2024-11-15T08:27:50,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742047_1223 (size=17447) 2024-11-15T08:27:50,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742047_1223 (size=17447) 2024-11-15T08:27:50,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742047_1223 (size=17447) 2024-11-15T08:27:50,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742048_1224 (size=349429) 2024-11-15T08:27:50,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742048_1224 (size=349429) 2024-11-15T08:27:50,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742048_1224 (size=349429) 2024-11-15T08:27:50,153 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0004_000001 (auth:SIMPLE) from 127.0.0.1:45992 2024-11-15T08:27:51,563 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:27:51,563 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:27:51,566 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-15T08:27:51,566 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:27:51,567 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:27:51,567 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T08:27:51,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T08:27:51,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T08:27:51,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@491c065d in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T08:27:51,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T08:27:51,568 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T08:27:51,570 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:51,605 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:27:51,605 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@491c065d, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T08:27:51,607 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:27:51,612 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-15T08:27:51,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:51,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:51,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-3523954785700105063.jar 2024-11-15T08:27:52,676 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,677 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,760 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-7695562707000743649.jar 2024-11-15T08:27:52,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:27:52,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:27:52,763 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:27:52,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:27:52,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:27:52,764 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:27:52,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:27:52,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:27:52,765 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:27:52,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:27:52,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:27:52,766 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:27:52,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:52,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:52,767 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:52,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:52,768 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:27:52,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:52,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:27:52,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742049_1225 (size=131440) 2024-11-15T08:27:52,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742049_1225 (size=131440) 2024-11-15T08:27:52,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742049_1225 (size=131440) 2024-11-15T08:27:52,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742050_1226 (size=4188619) 2024-11-15T08:27:52,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742050_1226 (size=4188619) 2024-11-15T08:27:52,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742050_1226 (size=4188619) 2024-11-15T08:27:52,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742051_1227 (size=1323991) 2024-11-15T08:27:52,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742051_1227 (size=1323991) 2024-11-15T08:27:52,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742051_1227 (size=1323991) 2024-11-15T08:27:52,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742052_1228 (size=6424751) 2024-11-15T08:27:52,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742052_1228 (size=6424751) 2024-11-15T08:27:52,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742052_1228 (size=6424751) 2024-11-15T08:27:52,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742053_1229 (size=903738) 2024-11-15T08:27:52,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742053_1229 (size=903738) 2024-11-15T08:27:52,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742053_1229 (size=903738) 2024-11-15T08:27:52,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742054_1230 (size=8360083) 2024-11-15T08:27:52,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742054_1230 (size=8360083) 2024-11-15T08:27:52,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742054_1230 (size=8360083) 2024-11-15T08:27:52,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742055_1231 (size=440656) 2024-11-15T08:27:52,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742055_1231 (size=440656) 2024-11-15T08:27:52,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742055_1231 (size=440656) 2024-11-15T08:27:52,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742056_1232 (size=1877034) 2024-11-15T08:27:52,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742056_1232 (size=1877034) 2024-11-15T08:27:52,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742056_1232 (size=1877034) 2024-11-15T08:27:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742057_1233 (size=77835) 2024-11-15T08:27:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742057_1233 (size=77835) 2024-11-15T08:27:52,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742057_1233 (size=77835) 2024-11-15T08:27:52,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742058_1234 (size=30949) 2024-11-15T08:27:52,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742058_1234 (size=30949) 2024-11-15T08:27:52,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742058_1234 (size=30949) 2024-11-15T08:27:53,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742059_1235 (size=1597327) 2024-11-15T08:27:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742059_1235 (size=1597327) 2024-11-15T08:27:53,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742059_1235 (size=1597327) 2024-11-15T08:27:53,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742060_1236 (size=4695811) 2024-11-15T08:27:53,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742060_1236 (size=4695811) 2024-11-15T08:27:53,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742060_1236 (size=4695811) 2024-11-15T08:27:53,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742061_1237 (size=232957) 2024-11-15T08:27:53,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742061_1237 (size=232957) 2024-11-15T08:27:53,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742061_1237 (size=232957) 2024-11-15T08:27:53,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742062_1238 (size=127628) 2024-11-15T08:27:53,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742062_1238 (size=127628) 2024-11-15T08:27:53,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742062_1238 (size=127628) 2024-11-15T08:27:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742063_1239 (size=20406) 2024-11-15T08:27:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742063_1239 (size=20406) 2024-11-15T08:27:53,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742063_1239 (size=20406) 2024-11-15T08:27:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742064_1240 (size=5175431) 2024-11-15T08:27:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742064_1240 (size=5175431) 2024-11-15T08:27:53,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742064_1240 (size=5175431) 2024-11-15T08:27:53,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742065_1241 (size=217634) 2024-11-15T08:27:53,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742065_1241 (size=217634) 2024-11-15T08:27:53,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742065_1241 (size=217634) 2024-11-15T08:27:53,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742066_1242 (size=1832290) 2024-11-15T08:27:53,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742066_1242 (size=1832290) 2024-11-15T08:27:53,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742066_1242 (size=1832290) 2024-11-15T08:27:53,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742067_1243 (size=322274) 2024-11-15T08:27:53,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742067_1243 (size=322274) 2024-11-15T08:27:53,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742067_1243 (size=322274) 2024-11-15T08:27:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742068_1244 (size=503880) 2024-11-15T08:27:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742068_1244 (size=503880) 2024-11-15T08:27:53,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742068_1244 (size=503880) 2024-11-15T08:27:53,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742069_1245 (size=29229) 2024-11-15T08:27:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742069_1245 (size=29229) 2024-11-15T08:27:53,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742069_1245 (size=29229) 2024-11-15T08:27:53,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742070_1246 (size=24096) 2024-11-15T08:27:53,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742070_1246 (size=24096) 2024-11-15T08:27:53,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742070_1246 (size=24096) 2024-11-15T08:27:53,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742071_1247 (size=111872) 2024-11-15T08:27:53,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742071_1247 (size=111872) 2024-11-15T08:27:53,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742071_1247 (size=111872) 2024-11-15T08:27:53,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742072_1248 (size=45609) 2024-11-15T08:27:53,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742072_1248 (size=45609) 2024-11-15T08:27:53,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742072_1248 (size=45609) 2024-11-15T08:27:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742073_1249 (size=136454) 2024-11-15T08:27:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742073_1249 (size=136454) 2024-11-15T08:27:53,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742073_1249 (size=136454) 2024-11-15T08:27:53,179 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:27:53,181 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-15T08:27:53,183 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:27:53,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742074_1250 (size=338) 2024-11-15T08:27:53,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742074_1250 (size=338) 2024-11-15T08:27:53,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742074_1250 (size=338) 2024-11-15T08:27:53,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742075_1251 (size=15) 2024-11-15T08:27:53,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742075_1251 (size=15) 2024-11-15T08:27:53,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742075_1251 (size=15) 2024-11-15T08:27:53,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742076_1252 (size=303777) 2024-11-15T08:27:53,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742076_1252 (size=303777) 2024-11-15T08:27:53,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742076_1252 (size=303777) 2024-11-15T08:27:56,256 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:27:56,257 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:27:56,264 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0004_000001 (auth:SIMPLE) from 127.0.0.1:40732 2024-11-15T08:27:56,280 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_1/usercache/jenkins/appcache/application_1731659181416_0004/container_1731659181416_0004_01_000001/launch_container.sh] 2024-11-15T08:27:56,280 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_1/usercache/jenkins/appcache/application_1731659181416_0004/container_1731659181416_0004_01_000001/container_tokens] 2024-11-15T08:27:56,280 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_1/usercache/jenkins/appcache/application_1731659181416_0004/container_1731659181416_0004_01_000001/sysfs] 2024-11-15T08:27:57,095 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0005_000001 (auth:SIMPLE) from 127.0.0.1:47240 2024-11-15T08:28:03,239 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0005_000001 (auth:SIMPLE) from 127.0.0.1:54434 2024-11-15T08:28:03,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742077_1253 (size=349427) 2024-11-15T08:28:03,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742077_1253 (size=349427) 2024-11-15T08:28:03,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742077_1253 (size=349427) 2024-11-15T08:28:05,486 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0005_000001 (auth:SIMPLE) from 127.0.0.1:55254 2024-11-15T08:28:08,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742078_1254 (size=16924) 2024-11-15T08:28:08,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742078_1254 (size=16924) 2024-11-15T08:28:08,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742078_1254 (size=16924) 2024-11-15T08:28:08,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742079_1255 (size=462) 2024-11-15T08:28:08,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742079_1255 (size=462) 2024-11-15T08:28:08,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742079_1255 (size=462) 2024-11-15T08:28:08,909 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0005/container_1731659181416_0005_01_000002/launch_container.sh] 2024-11-15T08:28:08,909 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0005/container_1731659181416_0005_01_000002/container_tokens] 2024-11-15T08:28:08,909 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0005/container_1731659181416_0005_01_000002/sysfs] 2024-11-15T08:28:08,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742080_1256 (size=16924) 2024-11-15T08:28:08,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742080_1256 (size=16924) 2024-11-15T08:28:08,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742080_1256 (size=16924) 2024-11-15T08:28:08,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742081_1257 (size=349427) 2024-11-15T08:28:08,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742081_1257 (size=349427) 2024-11-15T08:28:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742081_1257 (size=349427) 2024-11-15T08:28:08,971 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0005_000001 (auth:SIMPLE) from 127.0.0.1:55270 2024-11-15T08:28:10,602 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:28:10,603 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:28:10,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testConsecutiveExports 2024-11-15T08:28:10,606 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:28:10,606 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:28:10,606 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T08:28:10,608 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T08:28:10,608 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T08:28:10,608 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in org.apache.hadoop.fs.LocalFileSystem@491c065d in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-15T08:28:10,608 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-15T08:28:10,608 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659254276/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-15T08:28:10,624 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-15T08:28:10,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-15T08:28:10,628 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659290628"}]},"ts":"1731659290628"} 2024-11-15T08:28:10,630 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-15T08:28:10,630 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-15T08:28:10,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-15T08:28:10,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, UNASSIGN}, {pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, UNASSIGN}] 2024-11-15T08:28:10,633 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, UNASSIGN 2024-11-15T08:28:10,634 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, UNASSIGN 2024-11-15T08:28:10,635 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=84fc98286288801ee1d5ff09c3365f53, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:10,635 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=80794e360336d739c4803cc7ed8cd919, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:10,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, UNASSIGN because future has completed 2024-11-15T08:28:10,637 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:10,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 80794e360336d739c4803cc7ed8cd919, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:10,637 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=90, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, UNASSIGN because future has completed 2024-11-15T08:28:10,638 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:10,638 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=92, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 84fc98286288801ee1d5ff09c3365f53, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:10,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-15T08:28:10,790 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(122): Close 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:28:10,790 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(122): Close 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1722): Closing 80794e360336d739c4803cc7ed8cd919, disabling compactions & flushes 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1722): Closing 84fc98286288801ee1d5ff09c3365f53, disabling compactions & flushes 2024-11-15T08:28:10,790 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:28:10,790 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. after waiting 0 ms 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. after waiting 0 ms 2024-11-15T08:28:10,790 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:28:10,791 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:28:10,796 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:10,796 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:10,796 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:10,796 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:10,796 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919. 2024-11-15T08:28:10,796 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] regionserver.HRegion(1676): Region close journal for 80794e360336d739c4803cc7ed8cd919: Waiting for close lock at 1731659290790Running coprocessor pre-close hooks at 1731659290790Disabling compacts and flushes for region at 1731659290790Disabling writes for close at 1731659290790Writing region close event to WAL at 1731659290791 (+1 ms)Running coprocessor post-close hooks at 1731659290796 (+5 ms)Closed at 1731659290796 2024-11-15T08:28:10,796 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53. 2024-11-15T08:28:10,796 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1676): Region close journal for 84fc98286288801ee1d5ff09c3365f53: Waiting for close lock at 1731659290790Running coprocessor pre-close hooks at 1731659290790Disabling compacts and flushes for region at 1731659290790Disabling writes for close at 1731659290790Writing region close event to WAL at 1731659290791 (+1 ms)Running coprocessor post-close hooks at 1731659290796 (+5 ms)Closed at 1731659290796 2024-11-15T08:28:10,798 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=91}] handler.UnassignRegionHandler(157): Closed 80794e360336d739c4803cc7ed8cd919 2024-11-15T08:28:10,799 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=89 updating hbase:meta row=80794e360336d739c4803cc7ed8cd919, regionState=CLOSED 2024-11-15T08:28:10,800 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(157): Closed 84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:28:10,800 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=90 updating hbase:meta row=84fc98286288801ee1d5ff09c3365f53, regionState=CLOSED 2024-11-15T08:28:10,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=89, state=RUNNABLE, hasLock=false; CloseRegionProcedure 80794e360336d739c4803cc7ed8cd919, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:10,803 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE, hasLock=false; CloseRegionProcedure 84fc98286288801ee1d5ff09c3365f53, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:10,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=89 2024-11-15T08:28:10,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=89, state=SUCCESS, hasLock=false; CloseRegionProcedure 80794e360336d739c4803cc7ed8cd919, server=48e8ad8dc936,40637,1731659175269 in 166 msec 2024-11-15T08:28:10,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-11-15T08:28:10,806 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; CloseRegionProcedure 84fc98286288801ee1d5ff09c3365f53, server=48e8ad8dc936,37511,1731659175104 in 166 msec 2024-11-15T08:28:10,807 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=80794e360336d739c4803cc7ed8cd919, UNASSIGN in 174 msec 2024-11-15T08:28:10,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=88 2024-11-15T08:28:10,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=88, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=84fc98286288801ee1d5ff09c3365f53, UNASSIGN in 175 msec 2024-11-15T08:28:10,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=88, resume processing ppid=87 2024-11-15T08:28:10,812 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, ppid=87, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 180 msec 2024-11-15T08:28:10,813 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659290813"}]},"ts":"1731659290813"} 2024-11-15T08:28:10,815 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-15T08:28:10,815 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-15T08:28:10,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 191 msec 2024-11-15T08:28:10,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=87 2024-11-15T08:28:10,946 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T08:28:10,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-15T08:28:10,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,949 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-15T08:28:10,950 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,952 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-15T08:28:10,954 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919 2024-11-15T08:28:10,954 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:28:10,956 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/recovered.edits] 2024-11-15T08:28:10,956 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/recovered.edits] 2024-11-15T08:28:10,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T08:28:10,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T08:28:10,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T08:28:10,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-15T08:28:10,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:10,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-15T08:28:10,961 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf/3cfa26e821d643f8aa59f719241184a2 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/cf/3cfa26e821d643f8aa59f719241184a2 2024-11-15T08:28:10,961 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf/b4d2ef6b3ae0421fb32f091fd07685ed to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/cf/b4d2ef6b3ae0421fb32f091fd07685ed 2024-11-15T08:28:10,964 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919/recovered.edits/9.seqid 2024-11-15T08:28:10,964 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53/recovered.edits/9.seqid 2024-11-15T08:28:10,964 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/80794e360336d739c4803cc7ed8cd919 2024-11-15T08:28:10,964 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testConsecutiveExports/84fc98286288801ee1d5ff09c3365f53 2024-11-15T08:28:10,965 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-15T08:28:10,966 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,969 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-15T08:28:10,971 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-15T08:28:10,972 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,972 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-15T08:28:10,973 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659290972"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:10,973 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659290972"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:10,974 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:28:10,974 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 80794e360336d739c4803cc7ed8cd919, NAME => 'testtb-testConsecutiveExports,,1731659252923.80794e360336d739c4803cc7ed8cd919.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 84fc98286288801ee1d5ff09c3365f53, NAME => 'testtb-testConsecutiveExports,1,1731659252923.84fc98286288801ee1d5ff09c3365f53.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:28:10,974 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-15T08:28:10,975 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659290974"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:10,976 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-15T08:28:10,977 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-15T08:28:10,978 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 30 msec 2024-11-15T08:28:11,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=93 2024-11-15T08:28:11,067 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-15T08:28:11,067 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-15T08:28:11,074 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-15T08:28:11,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-15T08:28:11,078 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-15T08:28:11,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-15T08:28:11,100 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=788 (was 786) Potentially hanging thread: Thread-3713 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42319 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:39262 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:56092 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:42319 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_921335341_1 at /127.0.0.1:42728 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45533 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 15959) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:55582 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_921335341_1 at /127.0.0.1:33240 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=430 (was 402) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 19), AvailableMemoryMB=759 (was 1099) 2024-11-15T08:28:11,100 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-15T08:28:11,117 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=788, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=430, ProcessCount=19, AvailableMemoryMB=758 2024-11-15T08:28:11,117 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-15T08:28:11,119 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:28:11,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:11,121 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:28:11,121 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:11,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 94 2024-11-15T08:28:11,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T08:28:11,122 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:28:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742082_1258 (size=422) 2024-11-15T08:28:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742082_1258 (size=422) 2024-11-15T08:28:11,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742082_1258 (size=422) 2024-11-15T08:28:11,130 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cf05b1d7d16bcb282753fe29aabe0370, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:11,131 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a69cd0742c8d38c1cc58de125fa4e238, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742083_1259 (size=83) 2024-11-15T08:28:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742083_1259 (size=83) 2024-11-15T08:28:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742084_1260 (size=83) 2024-11-15T08:28:11,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742083_1259 (size=83) 2024-11-15T08:28:11,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742084_1260 (size=83) 2024-11-15T08:28:11,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742084_1260 (size=83) 2024-11-15T08:28:11,138 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing a69cd0742c8d38c1cc58de125fa4e238, disabling compactions & flushes 2024-11-15T08:28:11,139 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. after waiting 0 ms 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:11,139 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing cf05b1d7d16bcb282753fe29aabe0370, disabling compactions & flushes 2024-11-15T08:28:11,139 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for a69cd0742c8d38c1cc58de125fa4e238: Waiting for close lock at 1731659291139Disabling compacts and flushes for region at 1731659291139Disabling writes for close at 1731659291139Writing region close event to WAL at 1731659291139Closed at 1731659291139 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. after waiting 0 ms 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,139 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,139 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for cf05b1d7d16bcb282753fe29aabe0370: Waiting for close lock at 1731659291139Disabling compacts and flushes for region at 1731659291139Disabling writes for close at 1731659291139Writing region close event to WAL at 1731659291139Closed at 1731659291139 2024-11-15T08:28:11,140 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:28:11,141 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731659291140"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659291140"}]},"ts":"1731659291140"} 2024-11-15T08:28:11,141 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1731659291140"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659291140"}]},"ts":"1731659291140"} 2024-11-15T08:28:11,143 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:28:11,144 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:28:11,144 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659291144"}]},"ts":"1731659291144"} 2024-11-15T08:28:11,146 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-15T08:28:11,146 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:28:11,148 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:28:11,148 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:28:11,148 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:28:11,148 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:28:11,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, ASSIGN}, {pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, ASSIGN}] 2024-11-15T08:28:11,149 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, ASSIGN 2024-11-15T08:28:11,149 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, ASSIGN 2024-11-15T08:28:11,150 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:28:11,150 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:28:11,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T08:28:11,300 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:28:11,301 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=cf05b1d7d16bcb282753fe29aabe0370, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:11,301 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=a69cd0742c8d38c1cc58de125fa4e238, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:11,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, ASSIGN because future has completed 2024-11-15T08:28:11,303 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf05b1d7d16bcb282753fe29aabe0370, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:11,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=96, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, ASSIGN because future has completed 2024-11-15T08:28:11,304 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure a69cd0742c8d38c1cc58de125fa4e238, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T08:28:11,458 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,459 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7752): Opening region: {ENCODED => cf05b1d7d16bcb282753fe29aabe0370, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:28:11,459 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. service=AccessControlService 2024-11-15T08:28:11,459 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:11,459 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,459 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:11,459 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7794): checking encryption for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,459 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(7797): checking classloading for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,459 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7752): Opening region: {ENCODED => a69cd0742c8d38c1cc58de125fa4e238, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:28:11,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. service=AccessControlService 2024-11-15T08:28:11,460 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:11,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:11,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7794): checking encryption for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,460 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7797): checking classloading for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,461 INFO [StoreOpener-cf05b1d7d16bcb282753fe29aabe0370-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,461 INFO [StoreOpener-a69cd0742c8d38c1cc58de125fa4e238-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,462 INFO [StoreOpener-cf05b1d7d16bcb282753fe29aabe0370-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cf05b1d7d16bcb282753fe29aabe0370 columnFamilyName cf 2024-11-15T08:28:11,463 INFO [StoreOpener-a69cd0742c8d38c1cc58de125fa4e238-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a69cd0742c8d38c1cc58de125fa4e238 columnFamilyName cf 2024-11-15T08:28:11,463 DEBUG [StoreOpener-cf05b1d7d16bcb282753fe29aabe0370-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:11,463 DEBUG [StoreOpener-a69cd0742c8d38c1cc58de125fa4e238-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:11,463 INFO [StoreOpener-a69cd0742c8d38c1cc58de125fa4e238-1 {}] regionserver.HStore(327): Store=a69cd0742c8d38c1cc58de125fa4e238/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:11,463 INFO [StoreOpener-cf05b1d7d16bcb282753fe29aabe0370-1 {}] regionserver.HStore(327): Store=cf05b1d7d16bcb282753fe29aabe0370/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:11,463 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1038): replaying wal for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,463 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1038): replaying wal for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,464 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,464 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,464 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,464 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,465 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1048): stopping wal replay for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,465 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1048): stopping wal replay for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,465 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1060): Cleaning up temporary data for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,465 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1060): Cleaning up temporary data for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,466 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1093): writing seq id for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,466 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1093): writing seq id for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,468 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:11,469 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:11,469 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1114): Opened cf05b1d7d16bcb282753fe29aabe0370; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66629262, jitterRate=-0.007146626710891724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:11,469 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,469 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1114): Opened a69cd0742c8d38c1cc58de125fa4e238; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75345464, jitterRate=0.1227349042892456}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:11,469 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,469 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegion(1006): Region open journal for cf05b1d7d16bcb282753fe29aabe0370: Running coprocessor pre-open hook at 1731659291460Writing region info on filesystem at 1731659291460Initializing all the Stores at 1731659291460Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659291460Cleaning up temporary data from old regions at 1731659291465 (+5 ms)Running coprocessor post-open hooks at 1731659291469 (+4 ms)Region opened successfully at 1731659291469 2024-11-15T08:28:11,469 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1006): Region open journal for a69cd0742c8d38c1cc58de125fa4e238: Running coprocessor pre-open hook at 1731659291460Writing region info on filesystem at 1731659291460Initializing all the Stores at 1731659291461 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659291461Cleaning up temporary data from old regions at 1731659291465 (+4 ms)Running coprocessor post-open hooks at 1731659291469 (+4 ms)Region opened successfully at 1731659291469 2024-11-15T08:28:11,470 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238., pid=98, masterSystemTime=1731659291457 2024-11-15T08:28:11,470 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370., pid=97, masterSystemTime=1731659291455 2024-11-15T08:28:11,472 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,472 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=97}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,472 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=95 updating hbase:meta row=cf05b1d7d16bcb282753fe29aabe0370, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:11,473 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,473 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,473 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=96 updating hbase:meta row=a69cd0742c8d38c1cc58de125fa4e238, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:11,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=95, state=RUNNABLE, hasLock=false; OpenRegionProcedure cf05b1d7d16bcb282753fe29aabe0370, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:11,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE, hasLock=false; OpenRegionProcedure a69cd0742c8d38c1cc58de125fa4e238, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:11,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=95 2024-11-15T08:28:11,477 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=95, state=SUCCESS, hasLock=false; OpenRegionProcedure cf05b1d7d16bcb282753fe29aabe0370, server=48e8ad8dc936,37511,1731659175104 in 173 msec 2024-11-15T08:28:11,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-11-15T08:28:11,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; OpenRegionProcedure a69cd0742c8d38c1cc58de125fa4e238, server=48e8ad8dc936,40637,1731659175269 in 173 msec 2024-11-15T08:28:11,479 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, ASSIGN in 329 msec 2024-11-15T08:28:11,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=96, resume processing ppid=94 2024-11-15T08:28:11,480 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, ppid=94, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, ASSIGN in 330 msec 2024-11-15T08:28:11,481 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:28:11,481 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659291481"}]},"ts":"1731659291481"} 2024-11-15T08:28:11,483 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-15T08:28:11,483 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:28:11,484 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-15T08:28:11,486 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-15T08:28:11,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:11,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:11,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:11,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:11,491 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:11,491 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:11,491 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:11,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:11,493 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 372 msec 2024-11-15T08:28:11,550 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-15T08:28:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=94 2024-11-15T08:28:11,746 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T08:28:11,747 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-11-15T08:28:11,747 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:11,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-11-15T08:28:11,750 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:11,750 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-11-15T08:28:11,750 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T08:28:11,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T08:28:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659291753 (current time:1731659291753). 2024-11-15T08:28:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-15T08:28:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@214d7e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:11,755 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:11,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:11,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:11,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d200f09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:11,755 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:11,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:11,756 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:11,757 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45230, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:11,757 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43cf0a40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:11,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:11,759 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:11,760 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39904, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:11,761 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:11,761 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50466eb3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:11,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:11,763 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a429f6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:11,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:11,764 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:11,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a53d4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:11,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:11,766 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:11,767 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39906, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:11,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:11,770 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:11,770 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-15T08:28:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T08:28:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-15T08:28:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-15T08:28:11,773 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:11,774 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:11,776 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:11,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742085_1261 (size=215) 2024-11-15T08:28:11,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742085_1261 (size=215) 2024-11-15T08:28:11,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742085_1261 (size=215) 2024-11-15T08:28:11,787 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:11,787 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370}, {pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238}] 2024-11-15T08:28:11,788 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,788 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-15T08:28:11,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=100 2024-11-15T08:28:11,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=101 2024-11-15T08:28:11,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.HRegion(2603): Flush status journal for cf05b1d7d16bcb282753fe29aabe0370: 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.HRegion(2603): Flush status journal for a69cd0742c8d38c1cc58de125fa4e238: 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:11,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:11,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742086_1262 (size=86) 2024-11-15T08:28:11,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742086_1262 (size=86) 2024-11-15T08:28:11,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742086_1262 (size=86) 2024-11-15T08:28:11,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:11,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=100}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=100 2024-11-15T08:28:11,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=100 2024-11-15T08:28:11,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,953 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=100, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:11,955 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370 in 167 msec 2024-11-15T08:28:11,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742087_1263 (size=86) 2024-11-15T08:28:11,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742087_1263 (size=86) 2024-11-15T08:28:11,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742087_1263 (size=86) 2024-11-15T08:28:11,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:11,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-11-15T08:28:11,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=101 2024-11-15T08:28:11,960 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,960 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=101, ppid=99, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:11,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=101, resume processing ppid=99 2024-11-15T08:28:11,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, ppid=99, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238 in 174 msec 2024-11-15T08:28:11,962 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:11,963 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:11,964 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:11,964 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:11,964 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:11,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742088_1264 (size=597) 2024-11-15T08:28:11,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742088_1264 (size=597) 2024-11-15T08:28:11,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742088_1264 (size=597) 2024-11-15T08:28:11,978 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:11,982 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:11,983 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:11,984 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=99, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:11,984 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 99 2024-11-15T08:28:11,986 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=99, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 213 msec 2024-11-15T08:28:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=99 2024-11-15T08:28:12,086 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T08:28:12,090 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='023f605b80da3922c20a1fd1f9fdcff02', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:12,091 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='10b52c2477a3721415717cf57e18dc05c', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:12,092 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='283102104ae612faee0b0a8bfd67f3ba4', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:12,093 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='3a5a459e7e8ab117a611a4806f9de3bab', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:12,094 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='4d5493634b3598a2b8b8bcf7448b04860', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:12,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:12,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:12,098 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T08:28:12,100 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,100 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:12,101 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:12,102 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T08:28:12,107 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T08:28:12,112 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-15T08:28:12,114 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659292114 (current time:1731659292114). 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47409ad5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:12,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:12,115 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:12,115 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:12,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:12,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ad9061, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:12,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:12,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:12,116 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:12,117 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45272, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:12,117 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bdf3b58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:12,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:12,118 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:12,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:12,119 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39914, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:12,120 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:12,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:12,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:12,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:12,120 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59dfa6fa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:12,122 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:12,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:12,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:12,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44393675, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:12,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:12,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:12,123 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:12,123 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:12,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b11f585, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:12,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:12,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:12,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:12,126 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39924, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:12,127 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:12,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:12,129 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-15T08:28:12,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:12,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-15T08:28:12,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-15T08:28:12,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T08:28:12,131 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:12,132 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:12,134 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:12,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742089_1265 (size=210) 2024-11-15T08:28:12,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742089_1265 (size=210) 2024-11-15T08:28:12,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742089_1265 (size=210) 2024-11-15T08:28:12,141 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:12,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370}, {pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238}] 2024-11-15T08:28:12,142 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:12,142 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T08:28:12,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=104 2024-11-15T08:28:12,294 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:12,295 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2902): Flushing a69cd0742c8d38c1cc58de125fa4e238 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T08:28:12,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-15T08:28:12,296 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:12,297 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2902): Flushing cf05b1d7d16bcb282753fe29aabe0370 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T08:28:12,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/.tmp/cf/af6d47f8f19541e4ab91217fa00116f5 is 71, key is 037d02342c80e69cd2144a2024f8d50e/cf:q/1731659292095/Put/seqid=0 2024-11-15T08:28:12,318 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/.tmp/cf/fbccc0953e904dccbb9994749bd2a532 is 71, key is 191579b845c138adbd36a2f33387d40d/cf:q/1731659292097/Put/seqid=0 2024-11-15T08:28:12,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742091_1267 (size=8258) 2024-11-15T08:28:12,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742091_1267 (size=8258) 2024-11-15T08:28:12,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742090_1266 (size=5356) 2024-11-15T08:28:12,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742090_1266 (size=5356) 2024-11-15T08:28:12,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742091_1267 (size=8258) 2024-11-15T08:28:12,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742090_1266 (size=5356) 2024-11-15T08:28:12,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/.tmp/cf/fbccc0953e904dccbb9994749bd2a532 2024-11-15T08:28:12,329 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/.tmp/cf/af6d47f8f19541e4ab91217fa00116f5 2024-11-15T08:28:12,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/.tmp/cf/fbccc0953e904dccbb9994749bd2a532 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf/fbccc0953e904dccbb9994749bd2a532 2024-11-15T08:28:12,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/.tmp/cf/af6d47f8f19541e4ab91217fa00116f5 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf/af6d47f8f19541e4ab91217fa00116f5 2024-11-15T08:28:12,343 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf/fbccc0953e904dccbb9994749bd2a532, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T08:28:12,346 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a69cd0742c8d38c1cc58de125fa4e238 in 51ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:12,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.HRegion(2603): Flush status journal for a69cd0742c8d38c1cc58de125fa4e238: 2024-11-15T08:28:12,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T08:28:12,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:12,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf/fbccc0953e904dccbb9994749bd2a532] hfiles 2024-11-15T08:28:12,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf/fbccc0953e904dccbb9994749bd2a532 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,347 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf/af6d47f8f19541e4ab91217fa00116f5, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T08:28:12,348 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for cf05b1d7d16bcb282753fe29aabe0370 in 52ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:12,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for cf05b1d7d16bcb282753fe29aabe0370: 2024-11-15T08:28:12,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-15T08:28:12,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:12,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf/af6d47f8f19541e4ab91217fa00116f5] hfiles 2024-11-15T08:28:12,349 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf/af6d47f8f19541e4ab91217fa00116f5 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742092_1268 (size=125) 2024-11-15T08:28:12,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742092_1268 (size=125) 2024-11-15T08:28:12,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742092_1268 (size=125) 2024-11-15T08:28:12,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:12,366 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=104}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=104 2024-11-15T08:28:12,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=104 2024-11-15T08:28:12,367 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:12,367 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=104, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:12,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742093_1269 (size=125) 2024-11-15T08:28:12,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742093_1269 (size=125) 2024-11-15T08:28:12,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742093_1269 (size=125) 2024-11-15T08:28:12,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:12,371 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a69cd0742c8d38c1cc58de125fa4e238 in 228 msec 2024-11-15T08:28:12,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-15T08:28:12,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-15T08:28:12,371 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:12,372 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=102, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:12,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=102 2024-11-15T08:28:12,375 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=102, state=SUCCESS, hasLock=false; SnapshotRegionProcedure cf05b1d7d16bcb282753fe29aabe0370 in 231 msec 2024-11-15T08:28:12,375 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:12,375 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:12,376 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:12,376 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,377 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742094_1270 (size=675) 2024-11-15T08:28:12,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742094_1270 (size=675) 2024-11-15T08:28:12,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742094_1270 (size=675) 2024-11-15T08:28:12,393 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:12,398 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:12,399 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:12,400 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=102, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:12,400 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 102 2024-11-15T08:28:12,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=102, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 270 msec 2024-11-15T08:28:12,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=102 2024-11-15T08:28:12,446 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T08:28:12,469 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T08:28:12,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T08:28:12,470 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-15T08:28:12,471 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54968, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T08:28:12,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39926, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T08:28:12,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59288, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-15T08:28:12,473 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-15T08:28:12,473 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-15T08:28:12,473 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-15T08:28:12,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:28:12,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:12,477 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:28:12,477 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:12,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 105 2024-11-15T08:28:12,478 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:28:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T08:28:12,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742095_1271 (size=399) 2024-11-15T08:28:12,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742095_1271 (size=399) 2024-11-15T08:28:12,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742095_1271 (size=399) 2024-11-15T08:28:12,486 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c4d277bef84f42d17e48f1bbacdd433a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:12,487 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8ddb12e457b72ccff7a497a03db1cc4e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:12,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742097_1273 (size=85) 2024-11-15T08:28:12,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742097_1273 (size=85) 2024-11-15T08:28:12,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742097_1273 (size=85) 2024-11-15T08:28:12,498 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:12,498 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 8ddb12e457b72ccff7a497a03db1cc4e, disabling compactions & flushes 2024-11-15T08:28:12,498 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,498 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,498 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. after waiting 0 ms 2024-11-15T08:28:12,498 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,498 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,498 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8ddb12e457b72ccff7a497a03db1cc4e: Waiting for close lock at 1731659292498Disabling compacts and flushes for region at 1731659292498Disabling writes for close at 1731659292498Writing region close event to WAL at 1731659292498Closed at 1731659292498 2024-11-15T08:28:12,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742096_1272 (size=85) 2024-11-15T08:28:12,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742096_1272 (size=85) 2024-11-15T08:28:12,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742096_1272 (size=85) 2024-11-15T08:28:12,502 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:12,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing c4d277bef84f42d17e48f1bbacdd433a, disabling compactions & flushes 2024-11-15T08:28:12,503 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. after waiting 0 ms 2024-11-15T08:28:12,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,503 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,503 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for c4d277bef84f42d17e48f1bbacdd433a: Waiting for close lock at 1731659292503Disabling compacts and flushes for region at 1731659292503Disabling writes for close at 1731659292503Writing region close event to WAL at 1731659292503Closed at 1731659292503 2024-11-15T08:28:12,504 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:28:12,504 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731659292504"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659292504"}]},"ts":"1731659292504"} 2024-11-15T08:28:12,504 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1731659292504"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659292504"}]},"ts":"1731659292504"} 2024-11-15T08:28:12,507 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:28:12,507 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:28:12,508 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659292507"}]},"ts":"1731659292507"} 2024-11-15T08:28:12,509 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-15T08:28:12,510 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:28:12,511 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:28:12,511 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:28:12,511 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:28:12,511 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:28:12,511 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, ASSIGN}, {pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, ASSIGN}] 2024-11-15T08:28:12,512 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, ASSIGN 2024-11-15T08:28:12,512 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, ASSIGN 2024-11-15T08:28:12,513 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:28:12,513 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:28:12,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T08:28:12,664 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:28:12,664 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=c4d277bef84f42d17e48f1bbacdd433a, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:12,664 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=8ddb12e457b72ccff7a497a03db1cc4e, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:12,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=106, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, ASSIGN because future has completed 2024-11-15T08:28:12,668 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4d277bef84f42d17e48f1bbacdd433a, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:12,668 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=107, ppid=105, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, ASSIGN because future has completed 2024-11-15T08:28:12,669 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8ddb12e457b72ccff7a497a03db1cc4e, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:28:12,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T08:28:12,823 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,823 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7752): Opening region: {ENCODED => c4d277bef84f42d17e48f1bbacdd433a, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a.', STARTKEY => '', ENDKEY => '2'} 2024-11-15T08:28:12,823 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. service=AccessControlService 2024-11-15T08:28:12,823 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:12,824 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,824 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:12,824 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7794): checking encryption for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,824 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(7797): checking classloading for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,824 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,824 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7752): Opening region: {ENCODED => 8ddb12e457b72ccff7a497a03db1cc4e, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e.', STARTKEY => '2', ENDKEY => ''} 2024-11-15T08:28:12,824 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. service=AccessControlService 2024-11-15T08:28:12,825 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:12,825 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,825 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:12,825 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7794): checking encryption for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,825 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(7797): checking classloading for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,825 INFO [StoreOpener-c4d277bef84f42d17e48f1bbacdd433a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,826 INFO [StoreOpener-8ddb12e457b72ccff7a497a03db1cc4e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,827 INFO [StoreOpener-c4d277bef84f42d17e48f1bbacdd433a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c4d277bef84f42d17e48f1bbacdd433a columnFamilyName cf 2024-11-15T08:28:12,827 DEBUG [StoreOpener-c4d277bef84f42d17e48f1bbacdd433a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:12,827 INFO [StoreOpener-c4d277bef84f42d17e48f1bbacdd433a-1 {}] regionserver.HStore(327): Store=c4d277bef84f42d17e48f1bbacdd433a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:12,827 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1038): replaying wal for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,827 INFO [StoreOpener-8ddb12e457b72ccff7a497a03db1cc4e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8ddb12e457b72ccff7a497a03db1cc4e columnFamilyName cf 2024-11-15T08:28:12,827 DEBUG [StoreOpener-8ddb12e457b72ccff7a497a03db1cc4e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:12,828 INFO [StoreOpener-8ddb12e457b72ccff7a497a03db1cc4e-1 {}] regionserver.HStore(327): Store=8ddb12e457b72ccff7a497a03db1cc4e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:12,828 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,828 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1038): replaying wal for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,828 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,829 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,829 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1048): stopping wal replay for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,829 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1060): Cleaning up temporary data for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,829 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,829 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1048): stopping wal replay for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,829 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1060): Cleaning up temporary data for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,830 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1093): writing seq id for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,831 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1093): writing seq id for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,832 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:12,833 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1114): Opened c4d277bef84f42d17e48f1bbacdd433a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69122130, jitterRate=0.030000001192092896}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:12,833 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:12,833 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:12,833 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegion(1006): Region open journal for c4d277bef84f42d17e48f1bbacdd433a: Running coprocessor pre-open hook at 1731659292824Writing region info on filesystem at 1731659292824Initializing all the Stores at 1731659292825 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659292825Cleaning up temporary data from old regions at 1731659292829 (+4 ms)Running coprocessor post-open hooks at 1731659292833 (+4 ms)Region opened successfully at 1731659292833 2024-11-15T08:28:12,833 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1114): Opened 8ddb12e457b72ccff7a497a03db1cc4e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69906148, jitterRate=0.04168277978897095}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:12,833 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:12,833 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegion(1006): Region open journal for 8ddb12e457b72ccff7a497a03db1cc4e: Running coprocessor pre-open hook at 1731659292825Writing region info on filesystem at 1731659292825Initializing all the Stores at 1731659292826 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659292826Cleaning up temporary data from old regions at 1731659292829 (+3 ms)Running coprocessor post-open hooks at 1731659292833 (+4 ms)Region opened successfully at 1731659292833 2024-11-15T08:28:12,834 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a., pid=108, masterSystemTime=1731659292820 2024-11-15T08:28:12,834 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e., pid=109, masterSystemTime=1731659292821 2024-11-15T08:28:12,835 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,836 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=108}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:12,836 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=106 updating hbase:meta row=c4d277bef84f42d17e48f1bbacdd433a, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:12,836 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,836 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=109}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:12,837 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=107 updating hbase:meta row=8ddb12e457b72ccff7a497a03db1cc4e, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:12,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=108, ppid=106, state=RUNNABLE, hasLock=false; OpenRegionProcedure c4d277bef84f42d17e48f1bbacdd433a, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:12,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=107, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8ddb12e457b72ccff7a497a03db1cc4e, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:28:12,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=106 2024-11-15T08:28:12,841 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=106, state=SUCCESS, hasLock=false; OpenRegionProcedure c4d277bef84f42d17e48f1bbacdd433a, server=48e8ad8dc936,40637,1731659175269 in 171 msec 2024-11-15T08:28:12,841 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=107 2024-11-15T08:28:12,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=107, state=SUCCESS, hasLock=false; OpenRegionProcedure 8ddb12e457b72ccff7a497a03db1cc4e, server=48e8ad8dc936,34555,1731659175344 in 171 msec 2024-11-15T08:28:12,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, ASSIGN in 330 msec 2024-11-15T08:28:12,843 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=107, resume processing ppid=105 2024-11-15T08:28:12,843 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, ppid=105, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, ASSIGN in 331 msec 2024-11-15T08:28:12,844 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:28:12,844 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659292844"}]},"ts":"1731659292844"} 2024-11-15T08:28:12,845 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-15T08:28:12,846 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=105, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:28:12,846 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-15T08:28:12,849 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-15T08:28:12,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:12,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:12,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:12,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:12,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,854 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,855 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,855 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,855 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,855 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:12,855 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 378 msec 2024-11-15T08:28:13,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=105 2024-11-15T08:28:13,107 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T08:28:13,110 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:13,113 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:13,115 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-15T08:28:13,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [c4d277bef84f42d17e48f1bbacdd433a, 8ddb12e457b72ccff7a497a03db1cc4e] 2024-11-15T08:28:13,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c4d277bef84f42d17e48f1bbacdd433a, 8ddb12e457b72ccff7a497a03db1cc4e], force=true 2024-11-15T08:28:13,133 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c4d277bef84f42d17e48f1bbacdd433a, 8ddb12e457b72ccff7a497a03db1cc4e], force=true 2024-11-15T08:28:13,133 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c4d277bef84f42d17e48f1bbacdd433a, 8ddb12e457b72ccff7a497a03db1cc4e], force=true 2024-11-15T08:28:13,133 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c4d277bef84f42d17e48f1bbacdd433a, 8ddb12e457b72ccff7a497a03db1cc4e], force=true 2024-11-15T08:28:13,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T08:28:13,145 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, UNASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, UNASSIGN}] 2024-11-15T08:28:13,145 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, UNASSIGN 2024-11-15T08:28:13,146 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, UNASSIGN 2024-11-15T08:28:13,146 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=8ddb12e457b72ccff7a497a03db1cc4e, regionState=CLOSING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:13,146 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=c4d277bef84f42d17e48f1bbacdd433a, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:13,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, UNASSIGN because future has completed 2024-11-15T08:28:13,148 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:13,148 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8ddb12e457b72ccff7a497a03db1cc4e, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:28:13,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, UNASSIGN because future has completed 2024-11-15T08:28:13,149 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:13,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4d277bef84f42d17e48f1bbacdd433a, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:13,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T08:28:13,301 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(122): Close 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:13,301 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(122): Close c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1722): Closing 8ddb12e457b72ccff7a497a03db1cc4e, disabling compactions & flushes 2024-11-15T08:28:13,301 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1722): Closing c4d277bef84f42d17e48f1bbacdd433a, disabling compactions & flushes 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. after waiting 0 ms 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:13,301 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:13,301 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(2902): Flushing 8ddb12e457b72ccff7a497a03db1cc4e 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. after waiting 0 ms 2024-11-15T08:28:13,301 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:13,301 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(2902): Flushing c4d277bef84f42d17e48f1bbacdd433a 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-15T08:28:13,318 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/.tmp/cf/9a30f2663d0e42828ed8a518b9cb06cc is 28, key is 1/cf:/1731659293111/Put/seqid=0 2024-11-15T08:28:13,318 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/.tmp/cf/e18fdb147d174f0bb78bca8fdc751d1c is 28, key is 2/cf:/1731659293114/Put/seqid=0 2024-11-15T08:28:13,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742099_1275 (size=4945) 2024-11-15T08:28:13,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742099_1275 (size=4945) 2024-11-15T08:28:13,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742098_1274 (size=4945) 2024-11-15T08:28:13,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742098_1274 (size=4945) 2024-11-15T08:28:13,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742098_1274 (size=4945) 2024-11-15T08:28:13,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742099_1275 (size=4945) 2024-11-15T08:28:13,325 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/.tmp/cf/9a30f2663d0e42828ed8a518b9cb06cc 2024-11-15T08:28:13,325 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/.tmp/cf/e18fdb147d174f0bb78bca8fdc751d1c 2024-11-15T08:28:13,331 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/.tmp/cf/e18fdb147d174f0bb78bca8fdc751d1c as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf/e18fdb147d174f0bb78bca8fdc751d1c 2024-11-15T08:28:13,331 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/.tmp/cf/9a30f2663d0e42828ed8a518b9cb06cc as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf/9a30f2663d0e42828ed8a518b9cb06cc 2024-11-15T08:28:13,336 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf/e18fdb147d174f0bb78bca8fdc751d1c, entries=1, sequenceid=5, filesize=4.8 K 2024-11-15T08:28:13,336 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf/9a30f2663d0e42828ed8a518b9cb06cc, entries=1, sequenceid=5, filesize=4.8 K 2024-11-15T08:28:13,337 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8ddb12e457b72ccff7a497a03db1cc4e in 36ms, sequenceid=5, compaction requested=false 2024-11-15T08:28:13,337 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-15T08:28:13,337 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for c4d277bef84f42d17e48f1bbacdd433a in 36ms, sequenceid=5, compaction requested=false 2024-11-15T08:28:13,337 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-15T08:28:13,341 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T08:28:13,341 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T08:28:13,341 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:13,342 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. 2024-11-15T08:28:13,342 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:13,342 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] regionserver.HRegion(1676): Region close journal for 8ddb12e457b72ccff7a497a03db1cc4e: Waiting for close lock at 1731659293301Running coprocessor pre-close hooks at 1731659293301Disabling compacts and flushes for region at 1731659293301Disabling writes for close at 1731659293301Obtaining lock to block concurrent updates at 1731659293301Preparing flush snapshotting stores in 8ddb12e457b72ccff7a497a03db1cc4e at 1731659293301Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731659293301Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e. at 1731659293302 (+1 ms)Flushing 8ddb12e457b72ccff7a497a03db1cc4e/cf: creating writer at 1731659293302Flushing 8ddb12e457b72ccff7a497a03db1cc4e/cf: appending metadata at 1731659293317 (+15 ms)Flushing 8ddb12e457b72ccff7a497a03db1cc4e/cf: closing flushed file at 1731659293317Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7130af8f: reopening flushed file at 1731659293330 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8ddb12e457b72ccff7a497a03db1cc4e in 36ms, sequenceid=5, compaction requested=false at 1731659293337 (+7 ms)Writing region close event to WAL at 1731659293338 (+1 ms)Running coprocessor post-close hooks at 1731659293341 (+3 ms)Closed at 1731659293342 (+1 ms) 2024-11-15T08:28:13,342 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. 2024-11-15T08:28:13,342 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] regionserver.HRegion(1676): Region close journal for c4d277bef84f42d17e48f1bbacdd433a: Waiting for close lock at 1731659293301Running coprocessor pre-close hooks at 1731659293301Disabling compacts and flushes for region at 1731659293301Disabling writes for close at 1731659293301Obtaining lock to block concurrent updates at 1731659293301Preparing flush snapshotting stores in c4d277bef84f42d17e48f1bbacdd433a at 1731659293301Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1731659293301Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a. at 1731659293302 (+1 ms)Flushing c4d277bef84f42d17e48f1bbacdd433a/cf: creating writer at 1731659293302Flushing c4d277bef84f42d17e48f1bbacdd433a/cf: appending metadata at 1731659293317 (+15 ms)Flushing c4d277bef84f42d17e48f1bbacdd433a/cf: closing flushed file at 1731659293317Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@187de516: reopening flushed file at 1731659293330 (+13 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for c4d277bef84f42d17e48f1bbacdd433a in 36ms, sequenceid=5, compaction requested=false at 1731659293337 (+7 ms)Writing region close event to WAL at 1731659293338 (+1 ms)Running coprocessor post-close hooks at 1731659293342 (+4 ms)Closed at 1731659293342 2024-11-15T08:28:13,343 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=113}] handler.UnassignRegionHandler(157): Closed 8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:13,344 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=112 updating hbase:meta row=8ddb12e457b72ccff7a497a03db1cc4e, regionState=CLOSED 2024-11-15T08:28:13,344 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=114}] handler.UnassignRegionHandler(157): Closed c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:13,345 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=111 updating hbase:meta row=c4d277bef84f42d17e48f1bbacdd433a, regionState=CLOSED 2024-11-15T08:28:13,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=113, ppid=112, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8ddb12e457b72ccff7a497a03db1cc4e, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:28:13,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=114, ppid=111, state=RUNNABLE, hasLock=false; CloseRegionProcedure c4d277bef84f42d17e48f1bbacdd433a, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:13,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=113, resume processing ppid=112 2024-11-15T08:28:13,348 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, ppid=112, state=SUCCESS, hasLock=false; CloseRegionProcedure 8ddb12e457b72ccff7a497a03db1cc4e, server=48e8ad8dc936,34555,1731659175344 in 198 msec 2024-11-15T08:28:13,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=114, resume processing ppid=111 2024-11-15T08:28:13,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, ppid=111, state=SUCCESS, hasLock=false; CloseRegionProcedure c4d277bef84f42d17e48f1bbacdd433a, server=48e8ad8dc936,40637,1731659175269 in 198 msec 2024-11-15T08:28:13,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8ddb12e457b72ccff7a497a03db1cc4e, UNASSIGN in 203 msec 2024-11-15T08:28:13,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-15T08:28:13,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=c4d277bef84f42d17e48f1bbacdd433a, UNASSIGN in 204 msec 2024-11-15T08:28:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742100_1276 (size=84) 2024-11-15T08:28:13,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742100_1276 (size=84) 2024-11-15T08:28:13,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742100_1276 (size=84) 2024-11-15T08:28:13,366 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:13,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742101_1277 (size=20) 2024-11-15T08:28:13,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742101_1277 (size=20) 2024-11-15T08:28:13,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742101_1277 (size=20) 2024-11-15T08:28:13,382 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742102_1278 (size=21) 2024-11-15T08:28:13,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742102_1278 (size=21) 2024-11-15T08:28:13,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742102_1278 (size=21) 2024-11-15T08:28:13,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742103_1279 (size=84) 2024-11-15T08:28:13,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742103_1279 (size=84) 2024-11-15T08:28:13,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742103_1279 (size=84) 2024-11-15T08:28:13,402 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:13,414 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-15T08:28:13,416 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292475.c4d277bef84f42d17e48f1bbacdd433a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:13,416 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1731659292475.8ddb12e457b72ccff7a497a03db1cc4e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:13,416 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:13,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, ASSIGN}] 2024-11-15T08:28:13,436 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, ASSIGN 2024-11-15T08:28:13,436 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, ASSIGN; state=MERGED, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:28:13,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T08:28:13,477 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:28:13,587 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-15T08:28:13,587 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=586daedb5286d7e2974fc4cbcd969247, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:13,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, ASSIGN because future has completed 2024-11-15T08:28:13,590 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 586daedb5286d7e2974fc4cbcd969247, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:13,745 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:13,745 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7752): Opening region: {ENCODED => 586daedb5286d7e2974fc4cbcd969247, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.', STARTKEY => '', ENDKEY => ''} 2024-11-15T08:28:13,746 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. service=AccessControlService 2024-11-15T08:28:13,746 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:13,746 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,746 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:13,746 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7794): checking encryption for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,746 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(7797): checking classloading for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,747 INFO [StoreOpener-586daedb5286d7e2974fc4cbcd969247-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,748 INFO [StoreOpener-586daedb5286d7e2974fc4cbcd969247-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 586daedb5286d7e2974fc4cbcd969247 columnFamilyName cf 2024-11-15T08:28:13,748 DEBUG [StoreOpener-586daedb5286d7e2974fc4cbcd969247-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T08:28:13,766 DEBUG [StoreOpener-586daedb5286d7e2974fc4cbcd969247-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/9a30f2663d0e42828ed8a518b9cb06cc.c4d277bef84f42d17e48f1bbacdd433a->hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf/9a30f2663d0e42828ed8a518b9cb06cc-top 2024-11-15T08:28:13,771 DEBUG [StoreOpener-586daedb5286d7e2974fc4cbcd969247-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/e18fdb147d174f0bb78bca8fdc751d1c.8ddb12e457b72ccff7a497a03db1cc4e->hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf/e18fdb147d174f0bb78bca8fdc751d1c-top 2024-11-15T08:28:13,772 INFO [StoreOpener-586daedb5286d7e2974fc4cbcd969247-1 {}] regionserver.HStore(327): Store=586daedb5286d7e2974fc4cbcd969247/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:13,772 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1038): replaying wal for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,773 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,774 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,774 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1048): stopping wal replay for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,774 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1060): Cleaning up temporary data for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,776 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1093): writing seq id for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,776 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1114): Opened 586daedb5286d7e2974fc4cbcd969247; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67860733, jitterRate=0.011203721165657043}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:13,777 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:13,777 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegion(1006): Region open journal for 586daedb5286d7e2974fc4cbcd969247: Running coprocessor pre-open hook at 1731659293746Writing region info on filesystem at 1731659293746Initializing all the Stores at 1731659293747 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659293747Cleaning up temporary data from old regions at 1731659293774 (+27 ms)Running coprocessor post-open hooks at 1731659293777 (+3 ms)Region opened successfully at 1731659293777 2024-11-15T08:28:13,778 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247., pid=116, masterSystemTime=1731659293742 2024-11-15T08:28:13,778 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.,because compaction is disabled. 2024-11-15T08:28:13,780 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:13,780 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=116}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:13,781 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=586daedb5286d7e2974fc4cbcd969247, regionState=OPEN, openSeqNum=9, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:13,782 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 586daedb5286d7e2974fc4cbcd969247, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:13,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=116, resume processing ppid=115 2024-11-15T08:28:13,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 586daedb5286d7e2974fc4cbcd969247, server=48e8ad8dc936,40637,1731659175269 in 193 msec 2024-11-15T08:28:13,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=110 2024-11-15T08:28:13,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=110, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, ASSIGN in 350 msec 2024-11-15T08:28:13,788 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[c4d277bef84f42d17e48f1bbacdd433a, 8ddb12e457b72ccff7a497a03db1cc4e], force=true in 658 msec 2024-11-15T08:28:14,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=110 2024-11-15T08:28:14,277 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T08:28:14,277 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-15T08:28:14,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659294277 (current time:1731659294277). 2024-11-15T08:28:14,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:14,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-15T08:28:14,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:14,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@718b826d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:14,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:14,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:14,279 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:14,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:14,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:14,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43d06882, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:14,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:14,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:14,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:14,281 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45304, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:14,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb7d562, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:14,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:14,282 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:14,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:14,284 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39938, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:14,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:14,285 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5502c840, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:14,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:14,287 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:14,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:14,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:14,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@689bd319, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:14,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:14,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:14,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:14,288 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:14,288 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2eb51127, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:14,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:14,290 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39954, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:14,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:14,293 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:14,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:14,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:14,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:14,293 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:14,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-15T08:28:14,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:14,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-15T08:28:14,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-15T08:28:14,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-15T08:28:14,296 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:14,297 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:14,299 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:14,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742104_1280 (size=216) 2024-11-15T08:28:14,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742104_1280 (size=216) 2024-11-15T08:28:14,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742104_1280 (size=216) 2024-11-15T08:28:14,306 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:14,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 586daedb5286d7e2974fc4cbcd969247}] 2024-11-15T08:28:14,307 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:14,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-15T08:28:14,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=118 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.HRegion(2603): Flush status journal for 586daedb5286d7e2974fc4cbcd969247: 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/9a30f2663d0e42828ed8a518b9cb06cc.c4d277bef84f42d17e48f1bbacdd433a->hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf/9a30f2663d0e42828ed8a518b9cb06cc-top, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/e18fdb147d174f0bb78bca8fdc751d1c.8ddb12e457b72ccff7a497a03db1cc4e->hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf/e18fdb147d174f0bb78bca8fdc751d1c-top] hfiles 2024-11-15T08:28:14,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/9a30f2663d0e42828ed8a518b9cb06cc.c4d277bef84f42d17e48f1bbacdd433a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/e18fdb147d174f0bb78bca8fdc751d1c.8ddb12e457b72ccff7a497a03db1cc4e for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742105_1281 (size=269) 2024-11-15T08:28:14,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742105_1281 (size=269) 2024-11-15T08:28:14,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742105_1281 (size=269) 2024-11-15T08:28:14,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:14,467 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=118}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=118 2024-11-15T08:28:14,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=118 2024-11-15T08:28:14,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:14,467 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=118, ppid=117, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:14,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=117 2024-11-15T08:28:14,470 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=117, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 586daedb5286d7e2974fc4cbcd969247 in 162 msec 2024-11-15T08:28:14,470 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:14,471 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:14,471 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:14,471 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,472 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742106_1282 (size=670) 2024-11-15T08:28:14,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742106_1282 (size=670) 2024-11-15T08:28:14,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742106_1282 (size=670) 2024-11-15T08:28:14,481 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:14,486 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:14,486 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,487 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=117, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:14,487 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 117 2024-11-15T08:28:14,488 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=117, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 193 msec 2024-11-15T08:28:14,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=117 2024-11-15T08:28:14,616 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T08:28:14,616 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616 2024-11-15T08:28:14,617 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:14,647 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:14,647 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,649 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:28:14,654 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742107_1283 (size=216) 2024-11-15T08:28:14,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742108_1284 (size=670) 2024-11-15T08:28:14,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742108_1284 (size=670) 2024-11-15T08:28:14,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742107_1283 (size=216) 2024-11-15T08:28:14,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742107_1283 (size=216) 2024-11-15T08:28:14,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742108_1284 (size=670) 2024-11-15T08:28:14,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:14,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:14,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:14,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:14,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-15T08:28:14,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:14,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-15T08:28:14,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-15T08:28:15,063 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0005_000001 (auth:SIMPLE) from 127.0.0.1:46810 2024-11-15T08:28:15,074 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0005/container_1731659181416_0005_01_000001/launch_container.sh] 2024-11-15T08:28:15,074 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0005/container_1731659181416_0005_01_000001/container_tokens] 2024-11-15T08:28:15,074 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0005/container_1731659181416_0005_01_000001/sysfs] 2024-11-15T08:28:15,738 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-11245606495321836870.jar 2024-11-15T08:28:15,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,739 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,807 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-5482455354915585857.jar 2024-11-15T08:28:15,808 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,808 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,808 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,808 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:15,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:28:15,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:28:15,809 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:28:15,810 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:28:15,810 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:28:15,810 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:28:15,810 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:28:15,810 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:28:15,811 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:28:15,811 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:28:15,811 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:28:15,811 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:15,811 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:15,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:15,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:15,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:15,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:15,812 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:15,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742109_1285 (size=131440) 2024-11-15T08:28:15,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742109_1285 (size=131440) 2024-11-15T08:28:15,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742109_1285 (size=131440) 2024-11-15T08:28:15,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742110_1286 (size=4188619) 2024-11-15T08:28:15,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742110_1286 (size=4188619) 2024-11-15T08:28:15,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742110_1286 (size=4188619) 2024-11-15T08:28:15,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742111_1287 (size=1323991) 2024-11-15T08:28:15,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742111_1287 (size=1323991) 2024-11-15T08:28:15,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742111_1287 (size=1323991) 2024-11-15T08:28:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742112_1288 (size=903738) 2024-11-15T08:28:15,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742112_1288 (size=903738) 2024-11-15T08:28:15,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742112_1288 (size=903738) 2024-11-15T08:28:15,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742113_1289 (size=8360083) 2024-11-15T08:28:15,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742113_1289 (size=8360083) 2024-11-15T08:28:15,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742113_1289 (size=8360083) 2024-11-15T08:28:15,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742114_1290 (size=1877034) 2024-11-15T08:28:15,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742114_1290 (size=1877034) 2024-11-15T08:28:15,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742114_1290 (size=1877034) 2024-11-15T08:28:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742115_1291 (size=77835) 2024-11-15T08:28:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742115_1291 (size=77835) 2024-11-15T08:28:15,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742115_1291 (size=77835) 2024-11-15T08:28:15,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742116_1292 (size=30949) 2024-11-15T08:28:15,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742116_1292 (size=30949) 2024-11-15T08:28:15,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742116_1292 (size=30949) 2024-11-15T08:28:16,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742117_1293 (size=1597327) 2024-11-15T08:28:16,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742117_1293 (size=1597327) 2024-11-15T08:28:16,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742117_1293 (size=1597327) 2024-11-15T08:28:16,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742118_1294 (size=6424751) 2024-11-15T08:28:16,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742118_1294 (size=6424751) 2024-11-15T08:28:16,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742118_1294 (size=6424751) 2024-11-15T08:28:16,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742119_1295 (size=4695811) 2024-11-15T08:28:16,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742119_1295 (size=4695811) 2024-11-15T08:28:16,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742119_1295 (size=4695811) 2024-11-15T08:28:16,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742120_1296 (size=232957) 2024-11-15T08:28:16,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742120_1296 (size=232957) 2024-11-15T08:28:16,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742120_1296 (size=232957) 2024-11-15T08:28:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742121_1297 (size=127628) 2024-11-15T08:28:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742121_1297 (size=127628) 2024-11-15T08:28:16,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742121_1297 (size=127628) 2024-11-15T08:28:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742122_1298 (size=20406) 2024-11-15T08:28:16,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742122_1298 (size=20406) 2024-11-15T08:28:16,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742122_1298 (size=20406) 2024-11-15T08:28:16,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742123_1299 (size=5175431) 2024-11-15T08:28:16,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742123_1299 (size=5175431) 2024-11-15T08:28:16,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742123_1299 (size=5175431) 2024-11-15T08:28:16,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742124_1300 (size=440656) 2024-11-15T08:28:16,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742124_1300 (size=440656) 2024-11-15T08:28:16,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742124_1300 (size=440656) 2024-11-15T08:28:16,328 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:28:16,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742125_1301 (size=217634) 2024-11-15T08:28:16,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742125_1301 (size=217634) 2024-11-15T08:28:16,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742125_1301 (size=217634) 2024-11-15T08:28:16,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742126_1302 (size=1832290) 2024-11-15T08:28:16,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742126_1302 (size=1832290) 2024-11-15T08:28:16,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742126_1302 (size=1832290) 2024-11-15T08:28:16,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742127_1303 (size=322274) 2024-11-15T08:28:16,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742127_1303 (size=322274) 2024-11-15T08:28:16,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742127_1303 (size=322274) 2024-11-15T08:28:16,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742128_1304 (size=503880) 2024-11-15T08:28:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742128_1304 (size=503880) 2024-11-15T08:28:16,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742128_1304 (size=503880) 2024-11-15T08:28:16,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742129_1305 (size=29229) 2024-11-15T08:28:16,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742129_1305 (size=29229) 2024-11-15T08:28:16,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742129_1305 (size=29229) 2024-11-15T08:28:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742130_1306 (size=24096) 2024-11-15T08:28:16,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742130_1306 (size=24096) 2024-11-15T08:28:16,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742130_1306 (size=24096) 2024-11-15T08:28:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742131_1307 (size=111872) 2024-11-15T08:28:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742131_1307 (size=111872) 2024-11-15T08:28:16,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742131_1307 (size=111872) 2024-11-15T08:28:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742132_1308 (size=45609) 2024-11-15T08:28:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742132_1308 (size=45609) 2024-11-15T08:28:16,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742132_1308 (size=45609) 2024-11-15T08:28:16,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742133_1309 (size=136454) 2024-11-15T08:28:16,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742133_1309 (size=136454) 2024-11-15T08:28:16,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742133_1309 (size=136454) 2024-11-15T08:28:16,635 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:28:16,637 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-15T08:28:16,639 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=9.7 K 2024-11-15T08:28:16,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742134_1310 (size=378) 2024-11-15T08:28:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742134_1310 (size=378) 2024-11-15T08:28:16,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742134_1310 (size=378) 2024-11-15T08:28:16,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742135_1311 (size=15) 2024-11-15T08:28:16,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742135_1311 (size=15) 2024-11-15T08:28:16,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742135_1311 (size=15) 2024-11-15T08:28:16,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742136_1312 (size=303791) 2024-11-15T08:28:16,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742136_1312 (size=303791) 2024-11-15T08:28:16,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742136_1312 (size=303791) 2024-11-15T08:28:16,679 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:28:16,679 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:28:16,980 WARN [regionserver/48e8ad8dc936:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 5, running: 1 2024-11-15T08:28:17,064 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0006_000001 (auth:SIMPLE) from 127.0.0.1:46826 2024-11-15T08:28:17,904 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region cf05b1d7d16bcb282753fe29aabe0370 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:28:17,904 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a69cd0742c8d38c1cc58de125fa4e238 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:28:22,968 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0006_000001 (auth:SIMPLE) from 127.0.0.1:49594 2024-11-15T08:28:23,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742137_1313 (size=349441) 2024-11-15T08:28:23,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742137_1313 (size=349441) 2024-11-15T08:28:23,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742137_1313 (size=349441) 2024-11-15T08:28:25,214 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0006_000001 (auth:SIMPLE) from 127.0.0.1:42124 2024-11-15T08:28:28,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742138_1314 (size=4945) 2024-11-15T08:28:28,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742138_1314 (size=4945) 2024-11-15T08:28:28,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742138_1314 (size=4945) 2024-11-15T08:28:28,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742139_1315 (size=4945) 2024-11-15T08:28:28,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742139_1315 (size=4945) 2024-11-15T08:28:28,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742139_1315 (size=4945) 2024-11-15T08:28:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742140_1316 (size=17474) 2024-11-15T08:28:28,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742140_1316 (size=17474) 2024-11-15T08:28:28,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742140_1316 (size=17474) 2024-11-15T08:28:28,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742141_1317 (size=482) 2024-11-15T08:28:28,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742141_1317 (size=482) 2024-11-15T08:28:28,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742141_1317 (size=482) 2024-11-15T08:28:28,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742142_1318 (size=17474) 2024-11-15T08:28:28,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742142_1318 (size=17474) 2024-11-15T08:28:28,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742142_1318 (size=17474) 2024-11-15T08:28:28,799 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_2/usercache/jenkins/appcache/application_1731659181416_0006/container_1731659181416_0006_01_000002/launch_container.sh] 2024-11-15T08:28:28,799 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_2/usercache/jenkins/appcache/application_1731659181416_0006/container_1731659181416_0006_01_000002/container_tokens] 2024-11-15T08:28:28,799 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_2/usercache/jenkins/appcache/application_1731659181416_0006/container_1731659181416_0006_01_000002/sysfs] 2024-11-15T08:28:28,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742143_1319 (size=349441) 2024-11-15T08:28:28,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742143_1319 (size=349441) 2024-11-15T08:28:28,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742143_1319 (size=349441) 2024-11-15T08:28:28,815 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0006_000001 (auth:SIMPLE) from 127.0.0.1:42138 2024-11-15T08:28:30,011 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:28:30,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:28:30,019 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,019 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:28:30,019 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:28:30,019 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-15T08:28:30,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-15T08:28:30,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-15T08:28:30,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659294616/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-15T08:28:30,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-15T08:28:30,029 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659310029"}]},"ts":"1731659310029"} 2024-11-15T08:28:30,031 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-15T08:28:30,031 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-15T08:28:30,031 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-15T08:28:30,033 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, UNASSIGN}] 2024-11-15T08:28:30,033 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, UNASSIGN 2024-11-15T08:28:30,034 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=586daedb5286d7e2974fc4cbcd969247, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:30,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=121, ppid=120, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, UNASSIGN because future has completed 2024-11-15T08:28:30,036 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:30,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 586daedb5286d7e2974fc4cbcd969247, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:30,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-15T08:28:30,188 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(122): Close 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:30,188 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:30,188 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1722): Closing 586daedb5286d7e2974fc4cbcd969247, disabling compactions & flushes 2024-11-15T08:28:30,188 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:30,188 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:30,188 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. after waiting 0 ms 2024-11-15T08:28:30,188 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:30,193 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-15T08:28:30,193 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:30,194 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247. 2024-11-15T08:28:30,194 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] regionserver.HRegion(1676): Region close journal for 586daedb5286d7e2974fc4cbcd969247: Waiting for close lock at 1731659310188Running coprocessor pre-close hooks at 1731659310188Disabling compacts and flushes for region at 1731659310188Disabling writes for close at 1731659310188Writing region close event to WAL at 1731659310189 (+1 ms)Running coprocessor post-close hooks at 1731659310193 (+4 ms)Closed at 1731659310194 (+1 ms) 2024-11-15T08:28:30,195 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=122}] handler.UnassignRegionHandler(157): Closed 586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:30,196 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=121 updating hbase:meta row=586daedb5286d7e2974fc4cbcd969247, regionState=CLOSED 2024-11-15T08:28:30,197 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=122, ppid=121, state=RUNNABLE, hasLock=false; CloseRegionProcedure 586daedb5286d7e2974fc4cbcd969247, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:30,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=122, resume processing ppid=121 2024-11-15T08:28:30,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, ppid=121, state=SUCCESS, hasLock=false; CloseRegionProcedure 586daedb5286d7e2974fc4cbcd969247, server=48e8ad8dc936,40637,1731659175269 in 162 msec 2024-11-15T08:28:30,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=120 2024-11-15T08:28:30,201 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=120, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=586daedb5286d7e2974fc4cbcd969247, UNASSIGN in 167 msec 2024-11-15T08:28:30,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-15T08:28:30,204 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 171 msec 2024-11-15T08:28:30,205 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659310205"}]},"ts":"1731659310205"} 2024-11-15T08:28:30,207 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-15T08:28:30,207 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-15T08:28:30,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 182 msec 2024-11-15T08:28:30,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-15T08:28:30,346 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T08:28:30,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,349 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=123, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,349 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=123, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,353 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:30,353 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:30,353 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:30,355 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/recovered.edits] 2024-11-15T08:28:30,355 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/recovered.edits] 2024-11-15T08:28:30,355 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/recovered.edits] 2024-11-15T08:28:30,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T08:28:30,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T08:28:30,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T08:28:30,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-15T08:28:30,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:30,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:30,360 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:30,361 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf/9a30f2663d0e42828ed8a518b9cb06cc to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/cf/9a30f2663d0e42828ed8a518b9cb06cc 2024-11-15T08:28:30,361 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:30,361 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf/e18fdb147d174f0bb78bca8fdc751d1c to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/cf/e18fdb147d174f0bb78bca8fdc751d1c 2024-11-15T08:28:30,361 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/9a30f2663d0e42828ed8a518b9cb06cc.c4d277bef84f42d17e48f1bbacdd433a to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/9a30f2663d0e42828ed8a518b9cb06cc.c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:30,363 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/e18fdb147d174f0bb78bca8fdc751d1c.8ddb12e457b72ccff7a497a03db1cc4e to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/cf/e18fdb147d174f0bb78bca8fdc751d1c.8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:30,364 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/recovered.edits/8.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a/recovered.edits/8.seqid 2024-11-15T08:28:30,365 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/c4d277bef84f42d17e48f1bbacdd433a 2024-11-15T08:28:30,365 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/recovered.edits/8.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e/recovered.edits/8.seqid 2024-11-15T08:28:30,366 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8ddb12e457b72ccff7a497a03db1cc4e 2024-11-15T08:28:30,367 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/recovered.edits/12.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247/recovered.edits/12.seqid 2024-11-15T08:28:30,367 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/586daedb5286d7e2974fc4cbcd969247 2024-11-15T08:28:30,367 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-15T08:28:30,369 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=123, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,373 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-15T08:28:30,375 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-15T08:28:30,376 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=123, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,376 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-15T08:28:30,376 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659310376"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:30,378 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-15T08:28:30,378 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 586daedb5286d7e2974fc4cbcd969247, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247.', STARTKEY => '', ENDKEY => ''}] 2024-11-15T08:28:30,378 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-15T08:28:30,378 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659310378"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:30,380 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-15T08:28:30,381 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=123, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,382 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 34 msec 2024-11-15T08:28:30,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=123 2024-11-15T08:28:30,467 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,467 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-15T08:28:30,467 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-15T08:28:30,470 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659310470"}]},"ts":"1731659310470"} 2024-11-15T08:28:30,472 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-15T08:28:30,472 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-15T08:28:30,473 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-15T08:28:30,474 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, UNASSIGN}, {pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, UNASSIGN}] 2024-11-15T08:28:30,475 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, UNASSIGN 2024-11-15T08:28:30,475 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, UNASSIGN 2024-11-15T08:28:30,476 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=cf05b1d7d16bcb282753fe29aabe0370, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:30,476 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=a69cd0742c8d38c1cc58de125fa4e238, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:30,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, UNASSIGN because future has completed 2024-11-15T08:28:30,477 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:30,478 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure a69cd0742c8d38c1cc58de125fa4e238, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:30,478 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, UNASSIGN because future has completed 2024-11-15T08:28:30,478 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:30,478 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure cf05b1d7d16bcb282753fe29aabe0370, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-15T08:28:30,630 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(122): Close a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:30,630 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:30,630 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1722): Closing a69cd0742c8d38c1cc58de125fa4e238, disabling compactions & flushes 2024-11-15T08:28:30,630 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:30,630 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:30,630 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. after waiting 0 ms 2024-11-15T08:28:30,630 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:30,632 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:30,632 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:30,632 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing cf05b1d7d16bcb282753fe29aabe0370, disabling compactions & flushes 2024-11-15T08:28:30,632 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:30,632 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:30,632 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. after waiting 0 ms 2024-11-15T08:28:30,632 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:30,635 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:30,635 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:30,635 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238. 2024-11-15T08:28:30,636 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] regionserver.HRegion(1676): Region close journal for a69cd0742c8d38c1cc58de125fa4e238: Waiting for close lock at 1731659310630Running coprocessor pre-close hooks at 1731659310630Disabling compacts and flushes for region at 1731659310630Disabling writes for close at 1731659310630Writing region close event to WAL at 1731659310631 (+1 ms)Running coprocessor post-close hooks at 1731659310635 (+4 ms)Closed at 1731659310635 2024-11-15T08:28:30,637 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:30,637 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:30,637 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370. 2024-11-15T08:28:30,637 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for cf05b1d7d16bcb282753fe29aabe0370: Waiting for close lock at 1731659310632Running coprocessor pre-close hooks at 1731659310632Disabling compacts and flushes for region at 1731659310632Disabling writes for close at 1731659310632Writing region close event to WAL at 1731659310633 (+1 ms)Running coprocessor post-close hooks at 1731659310637 (+4 ms)Closed at 1731659310637 2024-11-15T08:28:30,638 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=128}] handler.UnassignRegionHandler(157): Closed a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:30,639 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=a69cd0742c8d38c1cc58de125fa4e238, regionState=CLOSED 2024-11-15T08:28:30,639 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:30,639 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=126 updating hbase:meta row=cf05b1d7d16bcb282753fe29aabe0370, regionState=CLOSED 2024-11-15T08:28:30,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure a69cd0742c8d38c1cc58de125fa4e238, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:30,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=126, state=RUNNABLE, hasLock=false; CloseRegionProcedure cf05b1d7d16bcb282753fe29aabe0370, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:30,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=127 2024-11-15T08:28:30,644 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure a69cd0742c8d38c1cc58de125fa4e238, server=48e8ad8dc936,40637,1731659175269 in 164 msec 2024-11-15T08:28:30,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=126 2024-11-15T08:28:30,644 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=126, state=SUCCESS, hasLock=false; CloseRegionProcedure cf05b1d7d16bcb282753fe29aabe0370, server=48e8ad8dc936,37511,1731659175104 in 164 msec 2024-11-15T08:28:30,645 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a69cd0742c8d38c1cc58de125fa4e238, UNASSIGN in 170 msec 2024-11-15T08:28:30,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-15T08:28:30,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=cf05b1d7d16bcb282753fe29aabe0370, UNASSIGN in 170 msec 2024-11-15T08:28:30,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=125, resume processing ppid=124 2024-11-15T08:28:30,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, ppid=124, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 174 msec 2024-11-15T08:28:30,649 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659310649"}]},"ts":"1731659310649"} 2024-11-15T08:28:30,651 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-15T08:28:30,651 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-15T08:28:30,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 197 msec 2024-11-15T08:28:30,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=124 2024-11-15T08:28:30,787 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T08:28:30,787 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,789 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=130, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,790 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=130, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,793 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,794 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:30,794 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:30,796 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/recovered.edits] 2024-11-15T08:28:30,796 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/recovered.edits] 2024-11-15T08:28:30,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,798 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T08:28:30,798 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T08:28:30,798 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T08:28:30,798 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-15T08:28:30,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,800 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:30,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-15T08:28:30,802 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf/fbccc0953e904dccbb9994749bd2a532 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/cf/fbccc0953e904dccbb9994749bd2a532 2024-11-15T08:28:30,805 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238/recovered.edits/9.seqid 2024-11-15T08:28:30,805 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf/af6d47f8f19541e4ab91217fa00116f5 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/cf/af6d47f8f19541e4ab91217fa00116f5 2024-11-15T08:28:30,805 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/a69cd0742c8d38c1cc58de125fa4e238 2024-11-15T08:28:30,808 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370/recovered.edits/9.seqid 2024-11-15T08:28:30,808 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithMergeRegion/cf05b1d7d16bcb282753fe29aabe0370 2024-11-15T08:28:30,809 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-15T08:28:30,811 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=130, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,813 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-15T08:28:30,816 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-15T08:28:30,817 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=130, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,817 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-15T08:28:30,817 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659310817"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:30,817 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659310817"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:30,819 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:28:30,819 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => cf05b1d7d16bcb282753fe29aabe0370, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1731659291118.cf05b1d7d16bcb282753fe29aabe0370.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a69cd0742c8d38c1cc58de125fa4e238, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1731659291118.a69cd0742c8d38c1cc58de125fa4e238.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:28:30,819 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-15T08:28:30,819 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659310819"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:30,821 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-15T08:28:30,822 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=130, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 34 msec 2024-11-15T08:28:30,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=130 2024-11-15T08:28:30,906 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,906 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-15T08:28:30,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-15T08:28:30,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-15T08:28:30,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:30,920 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-15T08:28:30,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:30,945 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=798 (was 788) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4400 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-872955243_1 at /127.0.0.1:55650 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:45912 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/48e8ad8dc936:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:55674 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-872955243_1 at /127.0.0.1:41084 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37413 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:45302 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 18830) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:37413 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=803 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=417 (was 430), ProcessCount=19 (was 19), AvailableMemoryMB=724 (was 758) 2024-11-15T08:28:30,945 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-11-15T08:28:30,963 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=798, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=417, ProcessCount=19, AvailableMemoryMB=724 2024-11-15T08:28:30,963 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-11-15T08:28:30,965 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:28:30,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:30,967 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:28:30,967 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:30,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 131 2024-11-15T08:28:30,968 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:28:30,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T08:28:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742144_1320 (size=407) 2024-11-15T08:28:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742144_1320 (size=407) 2024-11-15T08:28:30,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742144_1320 (size=407) 2024-11-15T08:28:30,976 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 772ec6a98743eb4d2af5ad780441089c, NAME => 'testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:30,976 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 69495864998cb8406b5e6f26788cdc60, NAME => 'testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:30,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742145_1321 (size=68) 2024-11-15T08:28:30,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742145_1321 (size=68) 2024-11-15T08:28:30,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742146_1322 (size=68) 2024-11-15T08:28:30,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742146_1322 (size=68) 2024-11-15T08:28:30,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742145_1321 (size=68) 2024-11-15T08:28:30,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742146_1322 (size=68) 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 69495864998cb8406b5e6f26788cdc60, disabling compactions & flushes 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 772ec6a98743eb4d2af5ad780441089c, disabling compactions & flushes 2024-11-15T08:28:30,984 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:30,984 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. after waiting 0 ms 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. after waiting 0 ms 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:30,984 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:30,984 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:30,984 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:30,985 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 772ec6a98743eb4d2af5ad780441089c: Waiting for close lock at 1731659310984Disabling compacts and flushes for region at 1731659310984Disabling writes for close at 1731659310984Writing region close event to WAL at 1731659310984Closed at 1731659310984 2024-11-15T08:28:30,985 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 69495864998cb8406b5e6f26788cdc60: Waiting for close lock at 1731659310984Disabling compacts and flushes for region at 1731659310984Disabling writes for close at 1731659310984Writing region close event to WAL at 1731659310984Closed at 1731659310984 2024-11-15T08:28:30,985 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:28:30,986 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731659310985"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659310985"}]},"ts":"1731659310985"} 2024-11-15T08:28:30,986 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1731659310985"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659310985"}]},"ts":"1731659310985"} 2024-11-15T08:28:30,988 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:28:30,988 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:28:30,989 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659310989"}]},"ts":"1731659310989"} 2024-11-15T08:28:30,990 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-15T08:28:30,990 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:28:30,991 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:28:30,991 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:28:30,991 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:28:30,991 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:28:30,992 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, ASSIGN}, {pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, ASSIGN}] 2024-11-15T08:28:30,992 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, ASSIGN 2024-11-15T08:28:30,993 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, ASSIGN 2024-11-15T08:28:30,993 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:28:30,993 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:28:31,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T08:28:31,144 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:28:31,144 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=772ec6a98743eb4d2af5ad780441089c, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:31,144 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=69495864998cb8406b5e6f26788cdc60, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:31,146 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=132, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, ASSIGN because future has completed 2024-11-15T08:28:31,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 772ec6a98743eb4d2af5ad780441089c, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:28:31,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=131, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, ASSIGN because future has completed 2024-11-15T08:28:31,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69495864998cb8406b5e6f26788cdc60, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:31,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T08:28:31,302 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:31,303 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => 69495864998cb8406b5e6f26788cdc60, NAME => 'testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:28:31,303 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. service=AccessControlService 2024-11-15T08:28:31,303 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:31,304 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,304 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:31,304 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,304 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,305 INFO [StoreOpener-69495864998cb8406b5e6f26788cdc60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,306 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:31,306 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7752): Opening region: {ENCODED => 772ec6a98743eb4d2af5ad780441089c, NAME => 'testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:28:31,306 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. service=AccessControlService 2024-11-15T08:28:31,307 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:31,307 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,307 INFO [StoreOpener-69495864998cb8406b5e6f26788cdc60-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69495864998cb8406b5e6f26788cdc60 columnFamilyName cf 2024-11-15T08:28:31,307 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:31,307 DEBUG [StoreOpener-69495864998cb8406b5e6f26788cdc60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:31,307 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7794): checking encryption for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,307 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(7797): checking classloading for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,307 INFO [StoreOpener-69495864998cb8406b5e6f26788cdc60-1 {}] regionserver.HStore(327): Store=69495864998cb8406b5e6f26788cdc60/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:31,308 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,308 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,308 INFO [StoreOpener-772ec6a98743eb4d2af5ad780441089c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,309 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,309 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,309 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,310 INFO [StoreOpener-772ec6a98743eb4d2af5ad780441089c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 772ec6a98743eb4d2af5ad780441089c columnFamilyName cf 2024-11-15T08:28:31,310 DEBUG [StoreOpener-772ec6a98743eb4d2af5ad780441089c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:31,310 INFO [StoreOpener-772ec6a98743eb4d2af5ad780441089c-1 {}] regionserver.HStore(327): Store=772ec6a98743eb4d2af5ad780441089c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:31,310 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1038): replaying wal for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,311 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,311 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,311 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,312 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1048): stopping wal replay for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,312 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1060): Cleaning up temporary data for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,313 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:31,313 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened 69495864998cb8406b5e6f26788cdc60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68366340, jitterRate=0.018737852573394775}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:31,313 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,313 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1093): writing seq id for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,314 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for 69495864998cb8406b5e6f26788cdc60: Running coprocessor pre-open hook at 1731659311304Writing region info on filesystem at 1731659311304Initializing all the Stores at 1731659311305 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659311305Cleaning up temporary data from old regions at 1731659311309 (+4 ms)Running coprocessor post-open hooks at 1731659311313 (+4 ms)Region opened successfully at 1731659311314 (+1 ms) 2024-11-15T08:28:31,315 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., pid=135, masterSystemTime=1731659311299 2024-11-15T08:28:31,315 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:31,315 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1114): Opened 772ec6a98743eb4d2af5ad780441089c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73036978, jitterRate=0.088335782289505}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:31,316 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,316 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegion(1006): Region open journal for 772ec6a98743eb4d2af5ad780441089c: Running coprocessor pre-open hook at 1731659311307Writing region info on filesystem at 1731659311307Initializing all the Stores at 1731659311308 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659311308Cleaning up temporary data from old regions at 1731659311312 (+4 ms)Running coprocessor post-open hooks at 1731659311316 (+4 ms)Region opened successfully at 1731659311316 2024-11-15T08:28:31,316 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c., pid=134, masterSystemTime=1731659311298 2024-11-15T08:28:31,317 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:31,317 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:31,318 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=69495864998cb8406b5e6f26788cdc60, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:31,318 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:31,318 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=134}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:31,318 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=132 updating hbase:meta row=772ec6a98743eb4d2af5ad780441089c, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:31,320 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 69495864998cb8406b5e6f26788cdc60, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:31,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE, hasLock=false; OpenRegionProcedure 772ec6a98743eb4d2af5ad780441089c, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:28:31,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=133 2024-11-15T08:28:31,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 69495864998cb8406b5e6f26788cdc60, server=48e8ad8dc936,40637,1731659175269 in 174 msec 2024-11-15T08:28:31,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-11-15T08:28:31,324 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; OpenRegionProcedure 772ec6a98743eb4d2af5ad780441089c, server=48e8ad8dc936,34555,1731659175344 in 176 msec 2024-11-15T08:28:31,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, ASSIGN in 332 msec 2024-11-15T08:28:31,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=132, resume processing ppid=131 2024-11-15T08:28:31,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, ppid=131, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, ASSIGN in 333 msec 2024-11-15T08:28:31,327 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:28:31,327 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659311327"}]},"ts":"1731659311327"} 2024-11-15T08:28:31,328 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-15T08:28:31,329 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=131, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:28:31,329 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-15T08:28:31,333 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T08:28:31,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:31,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:31,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:31,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:31,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:31,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:31,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:31,338 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:31,341 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 374 msec 2024-11-15T08:28:31,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-15T08:28:31,596 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T08:28:31,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-15T08:28:31,597 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:31,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-15T08:28:31,600 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:31,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-11-15T08:28:31,601 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:31,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T08:28:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659311603 (current time:1731659311603). 2024-11-15T08:28:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-15T08:28:31,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e6338fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:31,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:31,605 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:31,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:31,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:31,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3428696e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:31,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,607 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37044, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:31,607 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30911a8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:31,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:31,608 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:31,609 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55674, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:31,610 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:31,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,611 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8b1ffb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:31,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:31,612 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:31,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:31,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e4969bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:31,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:31,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,613 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37064, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:31,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76300589, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:31,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:31,615 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:31,616 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55690, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:31,618 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,619 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,619 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T08:28:31,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T08:28:31,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-15T08:28:31,622 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:31,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-15T08:28:31,623 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:31,625 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:31,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742147_1323 (size=170) 2024-11-15T08:28:31,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742147_1323 (size=170) 2024-11-15T08:28:31,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742147_1323 (size=170) 2024-11-15T08:28:31,633 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:31,633 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c}, {pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60}] 2024-11-15T08:28:31,634 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,634 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-15T08:28:31,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=137 2024-11-15T08:28:31,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-15T08:28:31,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:31,786 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.HRegion(2603): Flush status journal for 772ec6a98743eb4d2af5ad780441089c: 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 69495864998cb8406b5e6f26788cdc60: 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:31,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:31,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742149_1325 (size=71) 2024-11-15T08:28:31,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742149_1325 (size=71) 2024-11-15T08:28:31,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742149_1325 (size=71) 2024-11-15T08:28:31,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:31,803 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-15T08:28:31,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-15T08:28:31,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:31,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60 in 172 msec 2024-11-15T08:28:31,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742148_1324 (size=71) 2024-11-15T08:28:31,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742148_1324 (size=71) 2024-11-15T08:28:31,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742148_1324 (size=71) 2024-11-15T08:28:31,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:31,811 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-15T08:28:31,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=137 2024-11-15T08:28:31,812 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,812 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=137, ppid=136, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:31,815 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=137, resume processing ppid=136 2024-11-15T08:28:31,815 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, ppid=136, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c in 180 msec 2024-11-15T08:28:31,815 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:31,816 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:31,817 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:31,817 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-15T08:28:31,818 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-15T08:28:31,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742150_1326 (size=552) 2024-11-15T08:28:31,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742150_1326 (size=552) 2024-11-15T08:28:31,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742150_1326 (size=552) 2024-11-15T08:28:31,834 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:31,839 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:31,840 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-15T08:28:31,841 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=136, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:31,842 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 136 2024-11-15T08:28:31,843 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=136, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 222 msec 2024-11-15T08:28:31,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=136 2024-11-15T08:28:31,936 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T08:28:31,940 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='064153d55926a8ca70e15c317bc955748', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:31,941 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='10aa826290294aaa0f90e965c279294ef', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,941 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='2a291fe506abf7b9a051210bd954822b2', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,942 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4b69d299b688f3540cc66d9c1afcf200c', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,943 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='349b178336ba12845b45e1c66301bfc88', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,944 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='588ed7c3b0c210ef54aa1dbfa6f4b54c9', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,944 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='66cca79cdfd6ba9a52d5d5a9172410f07', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34555 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:31,947 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:31,949 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:31,950 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-15T08:28:31,951 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:31,951 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:31,953 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:31,959 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:31,966 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:31,970 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T08:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659311970 (current time:1731659311970). 2024-11-15T08:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-15T08:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:31,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a5b9ba4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:31,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:31,971 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:31,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:31,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:31,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45766ff2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:31,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:31,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,973 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37094, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:31,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@251eabd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:31,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:31,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:31,976 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55698, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:31,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,977 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49badf57, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:31,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:31,979 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ceb16f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:31,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,980 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:31,981 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67a74d0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:31,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:31,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:31,982 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:31,983 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55700, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:31,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:31,986 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:31,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:31,987 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T08:28:31,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-15T08:28:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-15T08:28:31,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-15T08:28:31,989 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:31,991 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:31,993 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:32,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742151_1327 (size=165) 2024-11-15T08:28:32,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742151_1327 (size=165) 2024-11-15T08:28:32,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742151_1327 (size=165) 2024-11-15T08:28:32,011 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:32,011 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c}, {pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60}] 2024-11-15T08:28:32,012 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:32,013 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-15T08:28:32,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=140 2024-11-15T08:28:32,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-15T08:28:32,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:32,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:32,165 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2902): Flushing 772ec6a98743eb4d2af5ad780441089c 1/1 column families, dataSize=65 B heapSize=400 B 2024-11-15T08:28:32,165 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 69495864998cb8406b5e6f26788cdc60 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-11-15T08:28:32,181 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/.tmp/cf/a784d9c2d24244b88171ff1e421ba1c5 is 69, key is 064153d55926a8ca70e15c317bc955748/cf:q/1731659311945/Put/seqid=0 2024-11-15T08:28:32,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/.tmp/cf/0e1e8b4c609a4f0288f09b87e24ef79d is 71, key is 16f1ff178b6ff07f08d7fef8af6358f7/cf:q/1731659311947/Put/seqid=0 2024-11-15T08:28:32,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742152_1328 (size=5149) 2024-11-15T08:28:32,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742152_1328 (size=5149) 2024-11-15T08:28:32,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742152_1328 (size=5149) 2024-11-15T08:28:32,190 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/.tmp/cf/a784d9c2d24244b88171ff1e421ba1c5 2024-11-15T08:28:32,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/.tmp/cf/a784d9c2d24244b88171ff1e421ba1c5 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf/a784d9c2d24244b88171ff1e421ba1c5 2024-11-15T08:28:32,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742153_1329 (size=8462) 2024-11-15T08:28:32,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742153_1329 (size=8462) 2024-11-15T08:28:32,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742153_1329 (size=8462) 2024-11-15T08:28:32,198 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/.tmp/cf/0e1e8b4c609a4f0288f09b87e24ef79d 2024-11-15T08:28:32,201 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf/a784d9c2d24244b88171ff1e421ba1c5, entries=1, sequenceid=6, filesize=5.0 K 2024-11-15T08:28:32,202 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 772ec6a98743eb4d2af5ad780441089c in 37ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:32,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-15T08:28:32,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.HRegion(2603): Flush status journal for 772ec6a98743eb4d2af5ad780441089c: 2024-11-15T08:28:32,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. for snaptb0-testExportExpiredSnapshot completed. 2024-11-15T08:28:32,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:32,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf/a784d9c2d24244b88171ff1e421ba1c5] hfiles 2024-11-15T08:28:32,203 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf/a784d9c2d24244b88171ff1e421ba1c5 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,204 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/.tmp/cf/0e1e8b4c609a4f0288f09b87e24ef79d as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf/0e1e8b4c609a4f0288f09b87e24ef79d 2024-11-15T08:28:32,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742154_1330 (size=110) 2024-11-15T08:28:32,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742154_1330 (size=110) 2024-11-15T08:28:32,209 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf/0e1e8b4c609a4f0288f09b87e24ef79d, entries=49, sequenceid=6, filesize=8.3 K 2024-11-15T08:28:32,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742154_1330 (size=110) 2024-11-15T08:28:32,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:32,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=140}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=140 2024-11-15T08:28:32,209 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 69495864998cb8406b5e6f26788cdc60 in 44ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:32,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 69495864998cb8406b5e6f26788cdc60: 2024-11-15T08:28:32,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. for snaptb0-testExportExpiredSnapshot completed. 2024-11-15T08:28:32,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=140 2024-11-15T08:28:32,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:32,210 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:32,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf/0e1e8b4c609a4f0288f09b87e24ef79d] hfiles 2024-11-15T08:28:32,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf/0e1e8b4c609a4f0288f09b87e24ef79d for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,210 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=140, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:32,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 772ec6a98743eb4d2af5ad780441089c in 200 msec 2024-11-15T08:28:32,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742155_1331 (size=110) 2024-11-15T08:28:32,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742155_1331 (size=110) 2024-11-15T08:28:32,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742155_1331 (size=110) 2024-11-15T08:28:32,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:32,216 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-15T08:28:32,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-15T08:28:32,217 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:32,217 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=139, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:32,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=139 2024-11-15T08:28:32,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=139, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 69495864998cb8406b5e6f26788cdc60 in 206 msec 2024-11-15T08:28:32,219 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:32,220 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:32,220 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:32,220 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,221 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742156_1332 (size=630) 2024-11-15T08:28:32,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742156_1332 (size=630) 2024-11-15T08:28:32,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742156_1332 (size=630) 2024-11-15T08:28:32,236 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:32,241 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:32,242 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:32,243 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=139, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:32,243 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 139 2024-11-15T08:28:32,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=139, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 255 msec 2024-11-15T08:28:32,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=139 2024-11-15T08:28:32,307 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T08:28:32,308 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:28:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-15T08:28:32,312 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:28:32,312 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:32,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 142 2024-11-15T08:28:32,313 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:28:32,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T08:28:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742157_1333 (size=400) 2024-11-15T08:28:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742157_1333 (size=400) 2024-11-15T08:28:32,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742157_1333 (size=400) 2024-11-15T08:28:32,325 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1f91fa0aee2cc478532b519b97469773, NAME => 'testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:32,325 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4746d1a25f8d5d89e0a76786d0716410, NAME => 'testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:32,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742158_1334 (size=61) 2024-11-15T08:28:32,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742159_1335 (size=61) 2024-11-15T08:28:32,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742159_1335 (size=61) 2024-11-15T08:28:32,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742158_1334 (size=61) 2024-11-15T08:28:32,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742158_1334 (size=61) 2024-11-15T08:28:32,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742159_1335 (size=61) 2024-11-15T08:28:32,338 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:32,338 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 4746d1a25f8d5d89e0a76786d0716410, disabling compactions & flushes 2024-11-15T08:28:32,339 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 1f91fa0aee2cc478532b519b97469773, disabling compactions & flushes 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. after waiting 0 ms 2024-11-15T08:28:32,339 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,339 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. after waiting 0 ms 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4746d1a25f8d5d89e0a76786d0716410: Waiting for close lock at 1731659312338Disabling compacts and flushes for region at 1731659312339 (+1 ms)Disabling writes for close at 1731659312339Writing region close event to WAL at 1731659312339Closed at 1731659312339 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,339 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,339 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1f91fa0aee2cc478532b519b97469773: Waiting for close lock at 1731659312339Disabling compacts and flushes for region at 1731659312339Disabling writes for close at 1731659312339Writing region close event to WAL at 1731659312339Closed at 1731659312339 2024-11-15T08:28:32,340 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:28:32,340 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731659312340"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659312340"}]},"ts":"1731659312340"} 2024-11-15T08:28:32,340 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1731659312340"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659312340"}]},"ts":"1731659312340"} 2024-11-15T08:28:32,343 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:28:32,344 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:28:32,344 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659312344"}]},"ts":"1731659312344"} 2024-11-15T08:28:32,345 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-15T08:28:32,346 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:28:32,347 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:28:32,347 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:28:32,347 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:28:32,347 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:28:32,347 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=4746d1a25f8d5d89e0a76786d0716410, ASSIGN}, {pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1f91fa0aee2cc478532b519b97469773, ASSIGN}] 2024-11-15T08:28:32,348 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1f91fa0aee2cc478532b519b97469773, ASSIGN 2024-11-15T08:28:32,348 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=4746d1a25f8d5d89e0a76786d0716410, ASSIGN 2024-11-15T08:28:32,349 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1f91fa0aee2cc478532b519b97469773, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:28:32,349 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=4746d1a25f8d5d89e0a76786d0716410, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:28:32,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T08:28:32,499 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:28:32,500 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=4746d1a25f8d5d89e0a76786d0716410, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:32,500 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=1f91fa0aee2cc478532b519b97469773, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:32,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=143, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=4746d1a25f8d5d89e0a76786d0716410, ASSIGN because future has completed 2024-11-15T08:28:32,502 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4746d1a25f8d5d89e0a76786d0716410, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:32,502 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=142, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1f91fa0aee2cc478532b519b97469773, ASSIGN because future has completed 2024-11-15T08:28:32,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f91fa0aee2cc478532b519b97469773, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:28:32,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T08:28:32,657 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,657 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7752): Opening region: {ENCODED => 4746d1a25f8d5d89e0a76786d0716410, NAME => 'testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:28:32,657 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. service=AccessControlService 2024-11-15T08:28:32,657 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:32,657 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:32,658 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7794): checking encryption for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(7797): checking classloading for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 1f91fa0aee2cc478532b519b97469773, NAME => 'testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. service=AccessControlService 2024-11-15T08:28:32,658 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,658 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:32,659 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,659 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,659 INFO [StoreOpener-4746d1a25f8d5d89e0a76786d0716410-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,660 INFO [StoreOpener-1f91fa0aee2cc478532b519b97469773-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,660 INFO [StoreOpener-4746d1a25f8d5d89e0a76786d0716410-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4746d1a25f8d5d89e0a76786d0716410 columnFamilyName cf 2024-11-15T08:28:32,660 DEBUG [StoreOpener-4746d1a25f8d5d89e0a76786d0716410-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:32,661 INFO [StoreOpener-4746d1a25f8d5d89e0a76786d0716410-1 {}] regionserver.HStore(327): Store=4746d1a25f8d5d89e0a76786d0716410/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:32,661 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1038): replaying wal for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,661 INFO [StoreOpener-1f91fa0aee2cc478532b519b97469773-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1f91fa0aee2cc478532b519b97469773 columnFamilyName cf 2024-11-15T08:28:32,661 DEBUG [StoreOpener-1f91fa0aee2cc478532b519b97469773-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:32,662 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,662 INFO [StoreOpener-1f91fa0aee2cc478532b519b97469773-1 {}] regionserver.HStore(327): Store=1f91fa0aee2cc478532b519b97469773/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:32,662 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,662 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,662 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1048): stopping wal replay for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,662 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,662 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1060): Cleaning up temporary data for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,663 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,663 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,663 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,664 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1093): writing seq id for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,664 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,666 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:32,666 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1114): Opened 4746d1a25f8d5d89e0a76786d0716410; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65542355, jitterRate=-0.023342803120613098}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:32,666 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:32,666 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,666 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 1f91fa0aee2cc478532b519b97469773; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67218125, jitterRate=0.0016281157732009888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:32,667 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegion(1006): Region open journal for 4746d1a25f8d5d89e0a76786d0716410: Running coprocessor pre-open hook at 1731659312658Writing region info on filesystem at 1731659312658Initializing all the Stores at 1731659312658Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659312658Cleaning up temporary data from old regions at 1731659312662 (+4 ms)Running coprocessor post-open hooks at 1731659312666 (+4 ms)Region opened successfully at 1731659312667 (+1 ms) 2024-11-15T08:28:32,667 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:32,667 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 1f91fa0aee2cc478532b519b97469773: Running coprocessor pre-open hook at 1731659312659Writing region info on filesystem at 1731659312659Initializing all the Stores at 1731659312659Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659312659Cleaning up temporary data from old regions at 1731659312663 (+4 ms)Running coprocessor post-open hooks at 1731659312667 (+4 ms)Region opened successfully at 1731659312667 2024-11-15T08:28:32,668 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773., pid=146, masterSystemTime=1731659312655 2024-11-15T08:28:32,668 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410., pid=145, masterSystemTime=1731659312653 2024-11-15T08:28:32,669 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,669 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:32,670 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=1f91fa0aee2cc478532b519b97469773, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:32,670 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,670 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=145}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,671 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=143 updating hbase:meta row=4746d1a25f8d5d89e0a76786d0716410, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:32,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1f91fa0aee2cc478532b519b97469773, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:28:32,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4746d1a25f8d5d89e0a76786d0716410, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:32,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-15T08:28:32,674 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 1f91fa0aee2cc478532b519b97469773, server=48e8ad8dc936,34555,1731659175344 in 169 msec 2024-11-15T08:28:32,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-15T08:28:32,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; OpenRegionProcedure 4746d1a25f8d5d89e0a76786d0716410, server=48e8ad8dc936,37511,1731659175104 in 171 msec 2024-11-15T08:28:32,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=1f91fa0aee2cc478532b519b97469773, ASSIGN in 327 msec 2024-11-15T08:28:32,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=143, resume processing ppid=142 2024-11-15T08:28:32,677 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, ppid=142, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=4746d1a25f8d5d89e0a76786d0716410, ASSIGN in 328 msec 2024-11-15T08:28:32,678 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:28:32,678 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659312678"}]},"ts":"1731659312678"} 2024-11-15T08:28:32,679 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-15T08:28:32,680 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=142, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:28:32,680 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-15T08:28:32,682 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T08:28:32,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:32,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:32,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:32,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:32,687 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,688 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:32,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 379 msec 2024-11-15T08:28:32,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=142 2024-11-15T08:28:32,936 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-15T08:28:32,936 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-11-15T08:28:32,937 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:32,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-11-15T08:28:32,940 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:32,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-11-15T08:28:32,940 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:32,945 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='09911874b34efa49b572a9e0673eefdf3', locateType=CURRENT is [region=testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:32,946 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1a88a58a155f7b3f2123d0550f2bbf1fa', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:32,947 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='276077edac8c3cece3b7895a7c9b1bc30', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:32,948 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='358e5ef0801fdeb35233dc081dc16a22a', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:32,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:32,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34555 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:32,954 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:32,956 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-15T08:28:32,956 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:32,957 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:32,958 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:32,962 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-15T08:28:32,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-15T08:28:32,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-15T08:28:32,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:32,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f2833a2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:32,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:32,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:32,969 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:32,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:32,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:32,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@96dbd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:32,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:32,969 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:32,970 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:32,970 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37128, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:32,971 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@602249e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:32,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:32,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:32,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:32,973 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:32,973 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:32,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:32,974 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:32,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3727c3ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:32,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:32,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:32,975 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:32,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:32,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:32,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7659e4fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:32,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:32,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:32,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:32,976 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37140, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:32,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a671906, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:32,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:32,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:32,978 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:32,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:32,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:32,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:32,981 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:32,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-15T08:28:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-15T08:28:32,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-15T08:28:32,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-15T08:28:32,983 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:32,984 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:32,986 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:32,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742160_1336 (size=152) 2024-11-15T08:28:32,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742160_1336 (size=152) 2024-11-15T08:28:32,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742160_1336 (size=152) 2024-11-15T08:28:32,993 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:32,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4746d1a25f8d5d89e0a76786d0716410}, {pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f91fa0aee2cc478532b519b97469773}] 2024-11-15T08:28:32,994 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:32,994 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:33,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-15T08:28:33,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=149 2024-11-15T08:28:33,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=148 2024-11-15T08:28:33,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:33,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:33,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2902): Flushing 4746d1a25f8d5d89e0a76786d0716410 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-15T08:28:33,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2902): Flushing 1f91fa0aee2cc478532b519b97469773 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-15T08:28:33,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/.tmp/cf/e9dc336bc05b45a7bb1782514d622e3b is 71, key is 05b76f0e6a1e8d3c31197598c72144c0/cf:q/1731659312951/Put/seqid=0 2024-11-15T08:28:33,162 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/.tmp/cf/818a13cc67a34099bec9b60b0137c9cf is 71, key is 1a7f04ee2a75b3a1abf8e60cff6ab86b/cf:q/1731659312953/Put/seqid=0 2024-11-15T08:28:33,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742161_1337 (size=8324) 2024-11-15T08:28:33,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742161_1337 (size=8324) 2024-11-15T08:28:33,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742162_1338 (size=5286) 2024-11-15T08:28:33,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742162_1338 (size=5286) 2024-11-15T08:28:33,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742161_1337 (size=8324) 2024-11-15T08:28:33,171 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/.tmp/cf/818a13cc67a34099bec9b60b0137c9cf 2024-11-15T08:28:33,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742162_1338 (size=5286) 2024-11-15T08:28:33,171 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/.tmp/cf/e9dc336bc05b45a7bb1782514d622e3b 2024-11-15T08:28:33,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/.tmp/cf/818a13cc67a34099bec9b60b0137c9cf as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/cf/818a13cc67a34099bec9b60b0137c9cf 2024-11-15T08:28:33,177 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/.tmp/cf/e9dc336bc05b45a7bb1782514d622e3b as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/cf/e9dc336bc05b45a7bb1782514d622e3b 2024-11-15T08:28:33,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/cf/818a13cc67a34099bec9b60b0137c9cf, entries=47, sequenceid=5, filesize=8.1 K 2024-11-15T08:28:33,181 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/cf/e9dc336bc05b45a7bb1782514d622e3b, entries=3, sequenceid=5, filesize=5.2 K 2024-11-15T08:28:33,182 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 4746d1a25f8d5d89e0a76786d0716410 in 36ms, sequenceid=5, compaction requested=false 2024-11-15T08:28:33,182 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 1f91fa0aee2cc478532b519b97469773 in 36ms, sequenceid=5, compaction requested=false 2024-11-15T08:28:33,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-15T08:28:33,182 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.HRegion(2603): Flush status journal for 1f91fa0aee2cc478532b519b97469773: 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.HRegion(2603): Flush status journal for 4746d1a25f8d5d89e0a76786d0716410: 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. for snapshot-testExportExpiredSnapshot completed. 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. for snapshot-testExportExpiredSnapshot completed. 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/cf/818a13cc67a34099bec9b60b0137c9cf] hfiles 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/cf/818a13cc67a34099bec9b60b0137c9cf for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/cf/e9dc336bc05b45a7bb1782514d622e3b] hfiles 2024-11-15T08:28:33,183 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/cf/e9dc336bc05b45a7bb1782514d622e3b for snapshot=snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742164_1340 (size=103) 2024-11-15T08:28:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742164_1340 (size=103) 2024-11-15T08:28:33,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742164_1340 (size=103) 2024-11-15T08:28:33,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:28:33,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=148}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=148 2024-11-15T08:28:33,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=148 2024-11-15T08:28:33,191 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:33,192 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:28:33,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4746d1a25f8d5d89e0a76786d0716410 in 199 msec 2024-11-15T08:28:33,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742163_1339 (size=103) 2024-11-15T08:28:33,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742163_1339 (size=103) 2024-11-15T08:28:33,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742163_1339 (size=103) 2024-11-15T08:28:33,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:28:33,202 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-15T08:28:33,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=149 2024-11-15T08:28:33,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:33,203 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=147, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:28:33,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=147 2024-11-15T08:28:33,205 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=147, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1f91fa0aee2cc478532b519b97469773 in 211 msec 2024-11-15T08:28:33,205 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:33,206 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:33,207 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:33,207 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,207 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742165_1341 (size=609) 2024-11-15T08:28:33,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742165_1341 (size=609) 2024-11-15T08:28:33,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742165_1341 (size=609) 2024-11-15T08:28:33,221 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:33,226 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:33,226 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-15T08:28:33,227 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=147, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:33,227 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 147 2024-11-15T08:28:33,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=147, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 245 msec 2024-11-15T08:28:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=147 2024-11-15T08:28:33,296 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-15T08:28:34,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-15T08:28:34,710 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-15T08:28:34,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-15T08:28:34,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-15T08:28:34,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-15T08:28:34,712 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-15T08:28:34,881 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0006_000001 (auth:SIMPLE) from 127.0.0.1:35606 2024-11-15T08:28:34,893 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0006/container_1731659181416_0006_01_000001/launch_container.sh] 2024-11-15T08:28:34,893 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0006/container_1731659181416_0006_01_000001/container_tokens] 2024-11-15T08:28:34,893 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0006/container_1731659181416_0006_01_000001/sysfs] 2024-11-15T08:28:35,717 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:28:43,304 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659323304 2024-11-15T08:28:43,304 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659323304, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659323304, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:43,335 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:43,335 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659323304, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659323304/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-15T08:28:43,338 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:28:43,339 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:951) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1096) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:314) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:28:43,341 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-15T08:28:43,344 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659323344"}]},"ts":"1731659323344"} 2024-11-15T08:28:43,346 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-15T08:28:43,346 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-15T08:28:43,347 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-15T08:28:43,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, UNASSIGN}, {pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, UNASSIGN}] 2024-11-15T08:28:43,349 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, UNASSIGN 2024-11-15T08:28:43,349 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, UNASSIGN 2024-11-15T08:28:43,350 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=69495864998cb8406b5e6f26788cdc60, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:43,350 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=772ec6a98743eb4d2af5ad780441089c, regionState=CLOSING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:43,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, UNASSIGN because future has completed 2024-11-15T08:28:43,351 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:43,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69495864998cb8406b5e6f26788cdc60, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:43,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, UNASSIGN because future has completed 2024-11-15T08:28:43,352 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:43,352 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 772ec6a98743eb4d2af5ad780441089c, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:28:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-15T08:28:43,477 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:28:43,504 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(122): Close 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:43,504 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:43,504 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1722): Closing 69495864998cb8406b5e6f26788cdc60, disabling compactions & flushes 2024-11-15T08:28:43,504 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:43,504 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:43,504 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. after waiting 0 ms 2024-11-15T08:28:43,504 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:43,505 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(122): Close 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:43,505 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:43,505 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1722): Closing 772ec6a98743eb4d2af5ad780441089c, disabling compactions & flushes 2024-11-15T08:28:43,505 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:43,505 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:43,505 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. after waiting 0 ms 2024-11-15T08:28:43,505 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:43,509 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:43,509 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:43,510 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:43,510 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:43,510 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60. 2024-11-15T08:28:43,510 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c. 2024-11-15T08:28:43,510 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] regionserver.HRegion(1676): Region close journal for 69495864998cb8406b5e6f26788cdc60: Waiting for close lock at 1731659323504Running coprocessor pre-close hooks at 1731659323504Disabling compacts and flushes for region at 1731659323504Disabling writes for close at 1731659323504Writing region close event to WAL at 1731659323505 (+1 ms)Running coprocessor post-close hooks at 1731659323510 (+5 ms)Closed at 1731659323510 2024-11-15T08:28:43,510 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1676): Region close journal for 772ec6a98743eb4d2af5ad780441089c: Waiting for close lock at 1731659323505Running coprocessor pre-close hooks at 1731659323505Disabling compacts and flushes for region at 1731659323505Disabling writes for close at 1731659323505Writing region close event to WAL at 1731659323506 (+1 ms)Running coprocessor post-close hooks at 1731659323510 (+4 ms)Closed at 1731659323510 2024-11-15T08:28:43,512 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=154}] handler.UnassignRegionHandler(157): Closed 69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:43,512 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=69495864998cb8406b5e6f26788cdc60, regionState=CLOSED 2024-11-15T08:28:43,513 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(157): Closed 772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:43,514 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=152 updating hbase:meta row=772ec6a98743eb4d2af5ad780441089c, regionState=CLOSED 2024-11-15T08:28:43,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; CloseRegionProcedure 69495864998cb8406b5e6f26788cdc60, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:43,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=155, ppid=152, state=RUNNABLE, hasLock=false; CloseRegionProcedure 772ec6a98743eb4d2af5ad780441089c, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:28:43,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-15T08:28:43,518 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; CloseRegionProcedure 69495864998cb8406b5e6f26788cdc60, server=48e8ad8dc936,40637,1731659175269 in 165 msec 2024-11-15T08:28:43,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=155, resume processing ppid=152 2024-11-15T08:28:43,519 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=69495864998cb8406b5e6f26788cdc60, UNASSIGN in 170 msec 2024-11-15T08:28:43,519 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, ppid=152, state=SUCCESS, hasLock=false; CloseRegionProcedure 772ec6a98743eb4d2af5ad780441089c, server=48e8ad8dc936,34555,1731659175344 in 165 msec 2024-11-15T08:28:43,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=151 2024-11-15T08:28:43,523 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=151, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=772ec6a98743eb4d2af5ad780441089c, UNASSIGN in 171 msec 2024-11-15T08:28:43,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-15T08:28:43,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 178 msec 2024-11-15T08:28:43,528 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659323527"}]},"ts":"1731659323527"} 2024-11-15T08:28:43,529 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-15T08:28:43,529 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-15T08:28:43,531 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 189 msec 2024-11-15T08:28:43,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=150 2024-11-15T08:28:43,656 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T08:28:43,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,658 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=156, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,659 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=156, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,661 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,662 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:43,663 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:43,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,664 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/recovered.edits] 2024-11-15T08:28:43,665 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T08:28:43,665 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T08:28:43,665 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T08:28:43,665 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:43,666 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:43,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-15T08:28:43,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:43,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:43,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:43,668 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:43,668 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/recovered.edits] 2024-11-15T08:28:43,670 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf/a784d9c2d24244b88171ff1e421ba1c5 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/cf/a784d9c2d24244b88171ff1e421ba1c5 2024-11-15T08:28:43,672 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf/0e1e8b4c609a4f0288f09b87e24ef79d to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/cf/0e1e8b4c609a4f0288f09b87e24ef79d 2024-11-15T08:28:43,672 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c/recovered.edits/9.seqid 2024-11-15T08:28:43,673 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/772ec6a98743eb4d2af5ad780441089c 2024-11-15T08:28:43,674 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60/recovered.edits/9.seqid 2024-11-15T08:28:43,674 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportExpiredSnapshot/69495864998cb8406b5e6f26788cdc60 2024-11-15T08:28:43,675 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-15T08:28:43,676 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=156, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,678 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-15T08:28:43,680 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-15T08:28:43,681 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=156, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,681 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-15T08:28:43,681 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659323681"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:43,681 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659323681"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:43,683 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:28:43,683 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 772ec6a98743eb4d2af5ad780441089c, NAME => 'testtb-testExportExpiredSnapshot,,1731659310964.772ec6a98743eb4d2af5ad780441089c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 69495864998cb8406b5e6f26788cdc60, NAME => 'testtb-testExportExpiredSnapshot,1,1731659310964.69495864998cb8406b5e6f26788cdc60.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:28:43,683 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-15T08:28:43,683 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659323683"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:43,685 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-15T08:28:43,686 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=156, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 30 msec 2024-11-15T08:28:43,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=156 2024-11-15T08:28:43,776 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-15T08:28:43,776 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-15T08:28:43,784 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-15T08:28:43,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-15T08:28:43,786 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-15T08:28:43,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-15T08:28:43,789 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-15T08:28:43,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-15T08:28:43,809 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=792 (was 798), OpenFileDescriptor=793 (was 803), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=365 (was 417), ProcessCount=13 (was 19), AvailableMemoryMB=1443 (was 724) - AvailableMemoryMB LEAK? - 2024-11-15T08:28:43,809 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-11-15T08:28:43,826 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=792, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=365, ProcessCount=13, AvailableMemoryMB=1443 2024-11-15T08:28:43,826 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-11-15T08:28:43,827 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:28:43,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:43,829 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:28:43,829 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:43,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 157 2024-11-15T08:28:43,830 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:28:43,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T08:28:43,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742166_1342 (size=412) 2024-11-15T08:28:43,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742166_1342 (size=412) 2024-11-15T08:28:43,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742166_1342 (size=412) 2024-11-15T08:28:43,841 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 77f5f1977f6b57fce43d928c1679745a, NAME => 'testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:43,841 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => c2712fa8b72f4baa09dd8489a16bd07e, NAME => 'testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742167_1343 (size=73) 2024-11-15T08:28:43,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742167_1343 (size=73) 2024-11-15T08:28:43,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742167_1343 (size=73) 2024-11-15T08:28:43,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742168_1344 (size=73) 2024-11-15T08:28:43,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742168_1344 (size=73) 2024-11-15T08:28:43,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742168_1344 (size=73) 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 77f5f1977f6b57fce43d928c1679745a, disabling compactions & flushes 2024-11-15T08:28:43,851 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. after waiting 0 ms 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:43,851 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 77f5f1977f6b57fce43d928c1679745a: Waiting for close lock at 1731659323851Disabling compacts and flushes for region at 1731659323851Disabling writes for close at 1731659323851Writing region close event to WAL at 1731659323851Closed at 1731659323851 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing c2712fa8b72f4baa09dd8489a16bd07e, disabling compactions & flushes 2024-11-15T08:28:43,851 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. after waiting 0 ms 2024-11-15T08:28:43,851 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:43,851 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:43,852 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for c2712fa8b72f4baa09dd8489a16bd07e: Waiting for close lock at 1731659323851Disabling compacts and flushes for region at 1731659323851Disabling writes for close at 1731659323851Writing region close event to WAL at 1731659323851Closed at 1731659323851 2024-11-15T08:28:43,853 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:28:43,853 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731659323853"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659323853"}]},"ts":"1731659323853"} 2024-11-15T08:28:43,853 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1731659323853"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659323853"}]},"ts":"1731659323853"} 2024-11-15T08:28:43,855 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:28:43,856 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:28:43,856 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659323856"}]},"ts":"1731659323856"} 2024-11-15T08:28:43,858 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-15T08:28:43,858 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:28:43,859 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:28:43,859 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:28:43,859 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:28:43,859 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:28:43,859 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, ASSIGN}, {pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, ASSIGN}] 2024-11-15T08:28:43,861 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, ASSIGN 2024-11-15T08:28:43,861 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, ASSIGN 2024-11-15T08:28:43,861 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:28:43,861 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:28:43,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T08:28:44,012 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:28:44,012 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=c2712fa8b72f4baa09dd8489a16bd07e, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:44,012 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=77f5f1977f6b57fce43d928c1679745a, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:44,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, ASSIGN because future has completed 2024-11-15T08:28:44,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77f5f1977f6b57fce43d928c1679745a, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:44,015 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, ASSIGN because future has completed 2024-11-15T08:28:44,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:44,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T08:28:44,169 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:44,169 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7752): Opening region: {ENCODED => 77f5f1977f6b57fce43d928c1679745a, NAME => 'testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:28:44,169 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. service=AccessControlService 2024-11-15T08:28:44,169 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:44,169 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,170 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:44,170 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7794): checking encryption for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,170 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(7797): checking classloading for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,170 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:44,170 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7752): Opening region: {ENCODED => c2712fa8b72f4baa09dd8489a16bd07e, NAME => 'testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:28:44,171 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. service=AccessControlService 2024-11-15T08:28:44,171 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:44,171 INFO [StoreOpener-77f5f1977f6b57fce43d928c1679745a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,171 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,171 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:44,171 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7794): checking encryption for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,171 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7797): checking classloading for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,172 INFO [StoreOpener-c2712fa8b72f4baa09dd8489a16bd07e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,172 INFO [StoreOpener-77f5f1977f6b57fce43d928c1679745a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 77f5f1977f6b57fce43d928c1679745a columnFamilyName cf 2024-11-15T08:28:44,172 DEBUG [StoreOpener-77f5f1977f6b57fce43d928c1679745a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:44,173 INFO [StoreOpener-77f5f1977f6b57fce43d928c1679745a-1 {}] regionserver.HStore(327): Store=77f5f1977f6b57fce43d928c1679745a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:44,173 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1038): replaying wal for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,173 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,174 INFO [StoreOpener-c2712fa8b72f4baa09dd8489a16bd07e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c2712fa8b72f4baa09dd8489a16bd07e columnFamilyName cf 2024-11-15T08:28:44,174 DEBUG [StoreOpener-c2712fa8b72f4baa09dd8489a16bd07e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:44,174 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,174 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1048): stopping wal replay for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,174 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1060): Cleaning up temporary data for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,174 INFO [StoreOpener-c2712fa8b72f4baa09dd8489a16bd07e-1 {}] regionserver.HStore(327): Store=c2712fa8b72f4baa09dd8489a16bd07e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:44,174 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1038): replaying wal for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,175 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,175 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,176 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1093): writing seq id for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,176 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1048): stopping wal replay for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,176 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1060): Cleaning up temporary data for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,177 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1093): writing seq id for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,177 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:44,178 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1114): Opened 77f5f1977f6b57fce43d928c1679745a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66231570, jitterRate=-0.013072699308395386}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:44,178 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,178 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegion(1006): Region open journal for 77f5f1977f6b57fce43d928c1679745a: Running coprocessor pre-open hook at 1731659324170Writing region info on filesystem at 1731659324170Initializing all the Stores at 1731659324170Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659324171 (+1 ms)Cleaning up temporary data from old regions at 1731659324174 (+3 ms)Running coprocessor post-open hooks at 1731659324178 (+4 ms)Region opened successfully at 1731659324178 2024-11-15T08:28:44,179 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:44,179 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a., pid=160, masterSystemTime=1731659324166 2024-11-15T08:28:44,179 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1114): Opened c2712fa8b72f4baa09dd8489a16bd07e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69954852, jitterRate=0.042408525943756104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:44,179 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,179 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1006): Region open journal for c2712fa8b72f4baa09dd8489a16bd07e: Running coprocessor pre-open hook at 1731659324171Writing region info on filesystem at 1731659324171Initializing all the Stores at 1731659324172 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659324172Cleaning up temporary data from old regions at 1731659324176 (+4 ms)Running coprocessor post-open hooks at 1731659324179 (+3 ms)Region opened successfully at 1731659324179 2024-11-15T08:28:44,180 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e., pid=161, masterSystemTime=1731659324167 2024-11-15T08:28:44,181 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:44,181 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=160}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:44,182 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=158 updating hbase:meta row=77f5f1977f6b57fce43d928c1679745a, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:44,182 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:44,182 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:44,182 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=c2712fa8b72f4baa09dd8489a16bd07e, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:44,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=158, state=RUNNABLE, hasLock=false; OpenRegionProcedure 77f5f1977f6b57fce43d928c1679745a, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:44,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=161, ppid=159, state=RUNNABLE, hasLock=false; OpenRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:44,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=158 2024-11-15T08:28:44,186 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=158, state=SUCCESS, hasLock=false; OpenRegionProcedure 77f5f1977f6b57fce43d928c1679745a, server=48e8ad8dc936,40637,1731659175269 in 170 msec 2024-11-15T08:28:44,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=159 2024-11-15T08:28:44,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=159, state=SUCCESS, hasLock=false; OpenRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e, server=48e8ad8dc936,37511,1731659175104 in 170 msec 2024-11-15T08:28:44,188 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, ASSIGN in 327 msec 2024-11-15T08:28:44,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-15T08:28:44,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, ASSIGN in 328 msec 2024-11-15T08:28:44,190 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:28:44,190 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659324190"}]},"ts":"1731659324190"} 2024-11-15T08:28:44,191 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-15T08:28:44,192 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=157, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:28:44,192 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-15T08:28:44,195 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T08:28:44,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:44,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:44,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:44,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:44,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 373 msec 2024-11-15T08:28:44,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-11-15T08:28:44,456 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T08:28:44,456 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-11-15T08:28:44,456 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:44,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-11-15T08:28:44,460 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:44,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-11-15T08:28:44,461 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:28:44,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:28:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659324463 (current time:1731659324463). 2024-11-15T08:28:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-15T08:28:44,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:44,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11e1441b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:44,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:44,465 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:44,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:44,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:44,465 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58c2f298, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:44,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:44,466 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,467 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43256, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:44,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6148deed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:44,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:44,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:44,469 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:44,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:44,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:44,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,470 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:44,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5095b190, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:44,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:44,472 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:44,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:44,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:44,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d138ed4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:44,472 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:44,473 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,473 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43272, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:44,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4440743, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:44,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:44,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:44,476 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53382, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:44,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:44,478 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169. 2024-11-15T08:28:44,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:44,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,479 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:44,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T08:28:44,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:44,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:28:44,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-15T08:28:44,481 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-15T08:28:44,482 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:44,484 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:44,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742169_1345 (size=185) 2024-11-15T08:28:44,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742169_1345 (size=185) 2024-11-15T08:28:44,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742169_1345 (size=185) 2024-11-15T08:28:44,491 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:44,491 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a}, {pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e}] 2024-11-15T08:28:44,492 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,492 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-15T08:28:44,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=163 2024-11-15T08:28:44,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=164 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.HRegion(2603): Flush status journal for c2712fa8b72f4baa09dd8489a16bd07e: 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.HRegion(2603): Flush status journal for 77f5f1977f6b57fce43d928c1679745a: 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:44,644 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:44,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:44,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:44,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742170_1346 (size=76) 2024-11-15T08:28:44,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742171_1347 (size=76) 2024-11-15T08:28:44,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742170_1346 (size=76) 2024-11-15T08:28:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742170_1346 (size=76) 2024-11-15T08:28:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742171_1347 (size=76) 2024-11-15T08:28:44,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742171_1347 (size=76) 2024-11-15T08:28:44,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:44,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=163}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=163 2024-11-15T08:28:44,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:44,652 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-15T08:28:44,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=164 2024-11-15T08:28:44,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=163 2024-11-15T08:28:44,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,653 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,653 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=163, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,653 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=162, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e in 163 msec 2024-11-15T08:28:44,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-15T08:28:44,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a in 163 msec 2024-11-15T08:28:44,656 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:44,657 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:44,658 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:44,658 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:44,659 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:44,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742172_1348 (size=567) 2024-11-15T08:28:44,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742172_1348 (size=567) 2024-11-15T08:28:44,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742172_1348 (size=567) 2024-11-15T08:28:44,670 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:44,674 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:44,674 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:44,675 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=162, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:44,675 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 162 2024-11-15T08:28:44,677 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=162, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 195 msec 2024-11-15T08:28:44,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-15T08:28:44,710 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-15T08:28:44,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-15T08:28:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-11-15T08:28:44,796 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T08:28:44,800 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='0a71c71687d43b9345aeee696534b456e', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:44,801 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1e213f43b11ea483a643a03666ec412af', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:44,802 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2f462cbe70ae7df8876002bc8d0731f21', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:44,802 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='378df5d6a61e98a85dd5814e404866fc8', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:44,803 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='4392bee3fbbe1dfbc9fc94cf2971f5151', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:44,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:44,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:44,809 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:28:44,811 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-15T08:28:44,811 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:44,812 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:44,813 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:28:44,817 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:28:44,822 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-15T08:28:44,825 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659324825 (current time:1731659324825). 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cfd580e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:44,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:44,826 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:44,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:44,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:44,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@452e4cc4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:44,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:44,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,828 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43286, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:44,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f05bb55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:44,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:44,829 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:44,830 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:44,831 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:28:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,831 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1de2aa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:44,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:44,833 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:44,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:44,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:44,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dedb08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:44,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:44,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,834 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43308, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:44,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69bba228, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:44,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:44,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:44,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:44,837 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:44,839 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:44,840 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:28:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:44,840 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-15T08:28:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:44,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-15T08:28:44,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-15T08:28:44,843 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:44,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-15T08:28:44,844 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:44,846 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742173_1349 (size=180) 2024-11-15T08:28:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742173_1349 (size=180) 2024-11-15T08:28:44,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742173_1349 (size=180) 2024-11-15T08:28:44,860 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:44,860 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a}, {pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e}] 2024-11-15T08:28:44,861 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:44,861 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:44,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-15T08:28:45,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=166 2024-11-15T08:28:45,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=167 2024-11-15T08:28:45,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:45,014 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:45,014 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2902): Flushing 77f5f1977f6b57fce43d928c1679745a 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T08:28:45,014 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2902): Flushing c2712fa8b72f4baa09dd8489a16bd07e 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T08:28:45,036 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/.tmp/cf/43edb3b7557a4658a254c7184b087fdb is 71, key is 1bab0f0e06297fd2c94e5b6407c12318/cf:q/1731659324808/Put/seqid=0 2024-11-15T08:28:45,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/.tmp/cf/a48b3ffaf1cd4336a324718dae0c3977 is 71, key is 06653f50610c7f0cd9e8b6113cdd5c73/cf:q/1731659324807/Put/seqid=0 2024-11-15T08:28:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742174_1350 (size=8256) 2024-11-15T08:28:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742174_1350 (size=8256) 2024-11-15T08:28:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742174_1350 (size=8256) 2024-11-15T08:28:45,052 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/.tmp/cf/43edb3b7557a4658a254c7184b087fdb 2024-11-15T08:28:45,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742175_1351 (size=5354) 2024-11-15T08:28:45,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742175_1351 (size=5354) 2024-11-15T08:28:45,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742175_1351 (size=5354) 2024-11-15T08:28:45,066 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/.tmp/cf/43edb3b7557a4658a254c7184b087fdb as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf/43edb3b7557a4658a254c7184b087fdb 2024-11-15T08:28:45,067 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/.tmp/cf/a48b3ffaf1cd4336a324718dae0c3977 2024-11-15T08:28:45,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf/43edb3b7557a4658a254c7184b087fdb, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T08:28:45,074 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/.tmp/cf/a48b3ffaf1cd4336a324718dae0c3977 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf/a48b3ffaf1cd4336a324718dae0c3977 2024-11-15T08:28:45,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for c2712fa8b72f4baa09dd8489a16bd07e in 64ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:45,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-15T08:28:45,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.HRegion(2603): Flush status journal for c2712fa8b72f4baa09dd8489a16bd07e: 2024-11-15T08:28:45,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-15T08:28:45,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:45,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf/43edb3b7557a4658a254c7184b087fdb] hfiles 2024-11-15T08:28:45,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf/43edb3b7557a4658a254c7184b087fdb for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf/a48b3ffaf1cd4336a324718dae0c3977, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T08:28:45,081 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 77f5f1977f6b57fce43d928c1679745a in 67ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.HRegion(2603): Flush status journal for 77f5f1977f6b57fce43d928c1679745a: 2024-11-15T08:28:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-15T08:28:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf/a48b3ffaf1cd4336a324718dae0c3977] hfiles 2024-11-15T08:28:45,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf/a48b3ffaf1cd4336a324718dae0c3977 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742176_1352 (size=115) 2024-11-15T08:28:45,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742176_1352 (size=115) 2024-11-15T08:28:45,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742176_1352 (size=115) 2024-11-15T08:28:45,093 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:45,093 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=167}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=167 2024-11-15T08:28:45,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742177_1353 (size=115) 2024-11-15T08:28:45,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=167 2024-11-15T08:28:45,094 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:45,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742177_1353 (size=115) 2024-11-15T08:28:45,094 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:45,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742177_1353 (size=115) 2024-11-15T08:28:45,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:45,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-15T08:28:45,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=166 2024-11-15T08:28:45,095 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:45,096 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=166, ppid=165, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:45,097 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e in 235 msec 2024-11-15T08:28:45,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-15T08:28:45,099 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 77f5f1977f6b57fce43d928c1679745a in 237 msec 2024-11-15T08:28:45,099 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:45,100 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:45,101 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:45,101 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,101 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742178_1354 (size=645) 2024-11-15T08:28:45,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742178_1354 (size=645) 2024-11-15T08:28:45,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742178_1354 (size=645) 2024-11-15T08:28:45,112 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:45,117 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:45,118 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,119 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=165, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:45,119 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 165 2024-11-15T08:28:45,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=165, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 278 msec 2024-11-15T08:28:45,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-15T08:28:45,157 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T08:28:45,157 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157 2024-11-15T08:28:45,157 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:45,203 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:45,203 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,206 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:28:45,218 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:45,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742179_1355 (size=185) 2024-11-15T08:28:45,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742179_1355 (size=185) 2024-11-15T08:28:45,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742179_1355 (size=185) 2024-11-15T08:28:45,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742180_1356 (size=567) 2024-11-15T08:28:45,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742180_1356 (size=567) 2024-11-15T08:28:45,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742180_1356 (size=567) 2024-11-15T08:28:45,248 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:45,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:45,249 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-5643103038617623442.jar 2024-11-15T08:28:46,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,433 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-4455259412992796499.jar 2024-11-15T08:28:46,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:46,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:28:46,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:28:46,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:28:46,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:28:46,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:28:46,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:28:46,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:28:46,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:28:46,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:28:46,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:28:46,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:28:46,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:46,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:46,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:46,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:46,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:46,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:46,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:46,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742181_1357 (size=131440) 2024-11-15T08:28:46,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742181_1357 (size=131440) 2024-11-15T08:28:46,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742181_1357 (size=131440) 2024-11-15T08:28:46,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742182_1358 (size=4188619) 2024-11-15T08:28:46,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742182_1358 (size=4188619) 2024-11-15T08:28:46,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742182_1358 (size=4188619) 2024-11-15T08:28:46,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742183_1359 (size=1323991) 2024-11-15T08:28:46,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742183_1359 (size=1323991) 2024-11-15T08:28:46,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742183_1359 (size=1323991) 2024-11-15T08:28:46,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742184_1360 (size=903738) 2024-11-15T08:28:46,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742184_1360 (size=903738) 2024-11-15T08:28:46,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742184_1360 (size=903738) 2024-11-15T08:28:46,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742185_1361 (size=8360083) 2024-11-15T08:28:46,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742185_1361 (size=8360083) 2024-11-15T08:28:46,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742185_1361 (size=8360083) 2024-11-15T08:28:46,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742186_1362 (size=1877034) 2024-11-15T08:28:46,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742186_1362 (size=1877034) 2024-11-15T08:28:46,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742186_1362 (size=1877034) 2024-11-15T08:28:46,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742187_1363 (size=77835) 2024-11-15T08:28:46,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742187_1363 (size=77835) 2024-11-15T08:28:46,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742187_1363 (size=77835) 2024-11-15T08:28:46,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742188_1364 (size=30949) 2024-11-15T08:28:46,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742188_1364 (size=30949) 2024-11-15T08:28:46,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742188_1364 (size=30949) 2024-11-15T08:28:46,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742189_1365 (size=1597327) 2024-11-15T08:28:46,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742189_1365 (size=1597327) 2024-11-15T08:28:46,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742189_1365 (size=1597327) 2024-11-15T08:28:46,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742190_1366 (size=4695811) 2024-11-15T08:28:46,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742190_1366 (size=4695811) 2024-11-15T08:28:46,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742190_1366 (size=4695811) 2024-11-15T08:28:46,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742191_1367 (size=6424751) 2024-11-15T08:28:46,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742191_1367 (size=6424751) 2024-11-15T08:28:46,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742191_1367 (size=6424751) 2024-11-15T08:28:46,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742192_1368 (size=232957) 2024-11-15T08:28:46,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742192_1368 (size=232957) 2024-11-15T08:28:46,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742192_1368 (size=232957) 2024-11-15T08:28:46,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742193_1369 (size=127628) 2024-11-15T08:28:46,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742193_1369 (size=127628) 2024-11-15T08:28:46,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742193_1369 (size=127628) 2024-11-15T08:28:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742194_1370 (size=20406) 2024-11-15T08:28:46,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742194_1370 (size=20406) 2024-11-15T08:28:46,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742194_1370 (size=20406) 2024-11-15T08:28:46,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742195_1371 (size=5175431) 2024-11-15T08:28:46,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742195_1371 (size=5175431) 2024-11-15T08:28:46,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742195_1371 (size=5175431) 2024-11-15T08:28:46,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742196_1372 (size=440656) 2024-11-15T08:28:46,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742196_1372 (size=440656) 2024-11-15T08:28:46,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742196_1372 (size=440656) 2024-11-15T08:28:46,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742197_1373 (size=217634) 2024-11-15T08:28:46,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742197_1373 (size=217634) 2024-11-15T08:28:46,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742197_1373 (size=217634) 2024-11-15T08:28:46,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742198_1374 (size=1832290) 2024-11-15T08:28:46,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742198_1374 (size=1832290) 2024-11-15T08:28:46,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742198_1374 (size=1832290) 2024-11-15T08:28:46,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742199_1375 (size=322274) 2024-11-15T08:28:46,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742199_1375 (size=322274) 2024-11-15T08:28:46,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742199_1375 (size=322274) 2024-11-15T08:28:46,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742200_1376 (size=503880) 2024-11-15T08:28:46,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742200_1376 (size=503880) 2024-11-15T08:28:46,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742200_1376 (size=503880) 2024-11-15T08:28:46,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742201_1377 (size=29229) 2024-11-15T08:28:46,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742201_1377 (size=29229) 2024-11-15T08:28:46,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742201_1377 (size=29229) 2024-11-15T08:28:46,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742202_1378 (size=24096) 2024-11-15T08:28:46,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742202_1378 (size=24096) 2024-11-15T08:28:46,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742202_1378 (size=24096) 2024-11-15T08:28:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742203_1379 (size=111872) 2024-11-15T08:28:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742203_1379 (size=111872) 2024-11-15T08:28:46,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742203_1379 (size=111872) 2024-11-15T08:28:46,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742204_1380 (size=45609) 2024-11-15T08:28:46,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742204_1380 (size=45609) 2024-11-15T08:28:46,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742204_1380 (size=45609) 2024-11-15T08:28:46,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742205_1381 (size=136454) 2024-11-15T08:28:46,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742205_1381 (size=136454) 2024-11-15T08:28:46,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742205_1381 (size=136454) 2024-11-15T08:28:46,885 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:28:46,887 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-15T08:28:46,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742206_1382 (size=7) 2024-11-15T08:28:46,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742206_1382 (size=7) 2024-11-15T08:28:46,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742206_1382 (size=7) 2024-11-15T08:28:46,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742207_1383 (size=10) 2024-11-15T08:28:46,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742207_1383 (size=10) 2024-11-15T08:28:46,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742207_1383 (size=10) 2024-11-15T08:28:46,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742208_1384 (size=303635) 2024-11-15T08:28:46,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742208_1384 (size=303635) 2024-11-15T08:28:46,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742208_1384 (size=303635) 2024-11-15T08:28:46,940 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:28:46,940 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:28:47,281 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0007_000001 (auth:SIMPLE) from 127.0.0.1:58056 2024-11-15T08:28:49,059 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:28:53,612 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0007_000001 (auth:SIMPLE) from 127.0.0.1:32884 2024-11-15T08:28:54,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742209_1385 (size=349261) 2024-11-15T08:28:54,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742209_1385 (size=349261) 2024-11-15T08:28:54,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742209_1385 (size=349261) 2024-11-15T08:28:54,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742210_1386 (size=8568) 2024-11-15T08:28:54,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742210_1386 (size=8568) 2024-11-15T08:28:54,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742210_1386 (size=8568) 2024-11-15T08:28:54,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742211_1387 (size=460) 2024-11-15T08:28:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742211_1387 (size=460) 2024-11-15T08:28:54,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742211_1387 (size=460) 2024-11-15T08:28:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742212_1388 (size=8568) 2024-11-15T08:28:54,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742212_1388 (size=8568) 2024-11-15T08:28:54,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742212_1388 (size=8568) 2024-11-15T08:28:54,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742213_1389 (size=349261) 2024-11-15T08:28:54,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742213_1389 (size=349261) 2024-11-15T08:28:54,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742213_1389 (size=349261) 2024-11-15T08:28:56,175 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:28:56,176 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:28:56,194 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:56,194 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:28:56,195 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:28:56,195 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:56,196 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-15T08:28:56,196 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-15T08:28:56,196 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:56,196 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-15T08:28:56,196 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659325157/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-15T08:28:56,213 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-15T08:28:56,218 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659336218"}]},"ts":"1731659336218"} 2024-11-15T08:28:56,221 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-15T08:28:56,221 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-15T08:28:56,223 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=168, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-15T08:28:56,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, UNASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, UNASSIGN}] 2024-11-15T08:28:56,232 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, UNASSIGN 2024-11-15T08:28:56,233 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, UNASSIGN 2024-11-15T08:28:56,234 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=c2712fa8b72f4baa09dd8489a16bd07e, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:56,235 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=77f5f1977f6b57fce43d928c1679745a, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:28:56,238 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, UNASSIGN because future has completed 2024-11-15T08:28:56,238 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:56,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:56,239 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, UNASSIGN because future has completed 2024-11-15T08:28:56,240 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:28:56,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 77f5f1977f6b57fce43d928c1679745a, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:28:56,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-15T08:28:56,392 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(122): Close c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:56,392 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:56,393 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1722): Closing c2712fa8b72f4baa09dd8489a16bd07e, disabling compactions & flushes 2024-11-15T08:28:56,393 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:56,393 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:56,393 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. after waiting 0 ms 2024-11-15T08:28:56,393 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:56,394 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(122): Close 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:56,394 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:28:56,394 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1722): Closing 77f5f1977f6b57fce43d928c1679745a, disabling compactions & flushes 2024-11-15T08:28:56,394 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:56,394 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:56,394 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. after waiting 0 ms 2024-11-15T08:28:56,394 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:56,405 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:56,406 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:56,406 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e. 2024-11-15T08:28:56,406 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1676): Region close journal for c2712fa8b72f4baa09dd8489a16bd07e: Waiting for close lock at 1731659336392Running coprocessor pre-close hooks at 1731659336393 (+1 ms)Disabling compacts and flushes for region at 1731659336393Disabling writes for close at 1731659336393Writing region close event to WAL at 1731659336396 (+3 ms)Running coprocessor post-close hooks at 1731659336406 (+10 ms)Closed at 1731659336406 2024-11-15T08:28:56,408 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:28:56,409 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(157): Closed c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:56,409 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:28:56,409 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=c2712fa8b72f4baa09dd8489a16bd07e, regionState=CLOSED 2024-11-15T08:28:56,409 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a. 2024-11-15T08:28:56,409 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] regionserver.HRegion(1676): Region close journal for 77f5f1977f6b57fce43d928c1679745a: Waiting for close lock at 1731659336394Running coprocessor pre-close hooks at 1731659336394Disabling compacts and flushes for region at 1731659336394Disabling writes for close at 1731659336394Writing region close event to WAL at 1731659336403 (+9 ms)Running coprocessor post-close hooks at 1731659336409 (+6 ms)Closed at 1731659336409 2024-11-15T08:28:56,412 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; CloseRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:56,413 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=173}] handler.UnassignRegionHandler(157): Closed 77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:56,413 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=77f5f1977f6b57fce43d928c1679745a, regionState=CLOSED 2024-11-15T08:28:56,414 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; CloseRegionProcedure 77f5f1977f6b57fce43d928c1679745a, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:28:56,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-11-15T08:28:56,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; CloseRegionProcedure c2712fa8b72f4baa09dd8489a16bd07e, server=48e8ad8dc936,37511,1731659175104 in 175 msec 2024-11-15T08:28:56,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-11-15T08:28:56,418 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; CloseRegionProcedure 77f5f1977f6b57fce43d928c1679745a, server=48e8ad8dc936,40637,1731659175269 in 175 msec 2024-11-15T08:28:56,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=c2712fa8b72f4baa09dd8489a16bd07e, UNASSIGN in 185 msec 2024-11-15T08:28:56,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=169 2024-11-15T08:28:56,420 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=77f5f1977f6b57fce43d928c1679745a, UNASSIGN in 187 msec 2024-11-15T08:28:56,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=168 2024-11-15T08:28:56,423 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=168, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 198 msec 2024-11-15T08:28:56,424 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659336424"}]},"ts":"1731659336424"} 2024-11-15T08:28:56,426 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-15T08:28:56,426 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-15T08:28:56,428 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 213 msec 2024-11-15T08:28:56,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-11-15T08:28:56,536 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T08:28:56,537 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,539 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=174, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,540 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=174, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,543 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,546 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:56,546 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:56,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,547 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,549 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T08:28:56,549 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T08:28:56,549 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T08:28:56,549 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/recovered.edits] 2024-11-15T08:28:56,550 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-15T08:28:56,550 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/recovered.edits] 2024-11-15T08:28:56,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:56,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:56,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:56,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:56,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-15T08:28:56,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:56,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:56,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:56,554 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:56,557 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf/43edb3b7557a4658a254c7184b087fdb to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/cf/43edb3b7557a4658a254c7184b087fdb 2024-11-15T08:28:56,557 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf/a48b3ffaf1cd4336a324718dae0c3977 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/cf/a48b3ffaf1cd4336a324718dae0c3977 2024-11-15T08:28:56,562 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e/recovered.edits/9.seqid 2024-11-15T08:28:56,562 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/c2712fa8b72f4baa09dd8489a16bd07e 2024-11-15T08:28:56,563 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a/recovered.edits/9.seqid 2024-11-15T08:28:56,563 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testEmptyExportFileSystemState/77f5f1977f6b57fce43d928c1679745a 2024-11-15T08:28:56,563 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-15T08:28:56,566 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=174, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,570 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-15T08:28:56,574 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-15T08:28:56,577 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=174, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,577 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-15T08:28:56,577 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659336577"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:56,577 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659336577"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:56,581 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:28:56,581 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 77f5f1977f6b57fce43d928c1679745a, NAME => 'testtb-testEmptyExportFileSystemState,,1731659323827.77f5f1977f6b57fce43d928c1679745a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c2712fa8b72f4baa09dd8489a16bd07e, NAME => 'testtb-testEmptyExportFileSystemState,1,1731659323827.c2712fa8b72f4baa09dd8489a16bd07e.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:28:56,581 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-15T08:28:56,582 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659336582"}]},"ts":"9223372036854775807"} 2024-11-15T08:28:56,584 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-15T08:28:56,585 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=174, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,587 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 48 msec 2024-11-15T08:28:56,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-11-15T08:28:56,656 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-15T08:28:56,656 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-15T08:28:56,663 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-15T08:28:56,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:56,666 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-15T08:28:56,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-15T08:28:56,697 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=805 (was 792) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1239495487_1 at /127.0.0.1:58050 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5194 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:34602 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:51182 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:58078 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1239495487_1 at /127.0.0.1:34580 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:43321 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:45533 from appattempt_1731659181416_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:46083 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 21734) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=817 (was 793) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=433 (was 365) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=2055 (was 1443) - AvailableMemoryMB LEAK? - 2024-11-15T08:28:56,697 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-15T08:28:56,720 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=805, OpenFileDescriptor=817, MaxFileDescriptor=1048576, SystemLoadAverage=433, ProcessCount=17, AvailableMemoryMB=2053 2024-11-15T08:28:56,720 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-15T08:28:56,722 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:28:56,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:28:56,725 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:28:56,725 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:56,725 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 175 2024-11-15T08:28:56,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T08:28:56,727 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:28:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742214_1390 (size=404) 2024-11-15T08:28:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742214_1390 (size=404) 2024-11-15T08:28:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742214_1390 (size=404) 2024-11-15T08:28:56,745 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ce1ae584917574af269eaf0008327bd5, NAME => 'testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:56,757 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 96df807fbed519d5640d78f1262cd1e8, NAME => 'testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:56,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742215_1391 (size=65) 2024-11-15T08:28:56,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742215_1391 (size=65) 2024-11-15T08:28:56,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742215_1391 (size=65) 2024-11-15T08:28:56,789 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:56,789 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing ce1ae584917574af269eaf0008327bd5, disabling compactions & flushes 2024-11-15T08:28:56,789 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:56,789 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:56,789 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. after waiting 0 ms 2024-11-15T08:28:56,789 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:56,789 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:56,789 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for ce1ae584917574af269eaf0008327bd5: Waiting for close lock at 1731659336789Disabling compacts and flushes for region at 1731659336789Disabling writes for close at 1731659336789Writing region close event to WAL at 1731659336789Closed at 1731659336789 2024-11-15T08:28:56,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742216_1392 (size=65) 2024-11-15T08:28:56,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742216_1392 (size=65) 2024-11-15T08:28:56,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742216_1392 (size=65) 2024-11-15T08:28:56,796 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:56,797 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 96df807fbed519d5640d78f1262cd1e8, disabling compactions & flushes 2024-11-15T08:28:56,797 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:56,797 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:56,797 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. after waiting 0 ms 2024-11-15T08:28:56,797 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:56,797 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:56,797 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 96df807fbed519d5640d78f1262cd1e8: Waiting for close lock at 1731659336797Disabling compacts and flushes for region at 1731659336797Disabling writes for close at 1731659336797Writing region close event to WAL at 1731659336797Closed at 1731659336797 2024-11-15T08:28:56,798 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:28:56,798 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731659336798"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659336798"}]},"ts":"1731659336798"} 2024-11-15T08:28:56,798 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1731659336798"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659336798"}]},"ts":"1731659336798"} 2024-11-15T08:28:56,801 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:28:56,802 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:28:56,802 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659336802"}]},"ts":"1731659336802"} 2024-11-15T08:28:56,804 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-15T08:28:56,805 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:28:56,806 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:28:56,806 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:28:56,806 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:28:56,806 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:28:56,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, ASSIGN}, {pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, ASSIGN}] 2024-11-15T08:28:56,807 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, ASSIGN 2024-11-15T08:28:56,807 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, ASSIGN 2024-11-15T08:28:56,809 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, ASSIGN; state=OFFLINE, location=48e8ad8dc936,34555,1731659175344; forceNewPlan=false, retain=false 2024-11-15T08:28:56,809 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:28:56,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T08:28:56,959 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:28:56,960 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=ce1ae584917574af269eaf0008327bd5, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:56,961 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=96df807fbed519d5640d78f1262cd1e8, regionState=OPENING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:56,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, ASSIGN because future has completed 2024-11-15T08:28:56,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure ce1ae584917574af269eaf0008327bd5, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:28:56,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=177, ppid=175, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, ASSIGN because future has completed 2024-11-15T08:28:56,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96df807fbed519d5640d78f1262cd1e8, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:28:57,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T08:28:57,123 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,123 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7752): Opening region: {ENCODED => ce1ae584917574af269eaf0008327bd5, NAME => 'testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:28:57,123 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. service=AccessControlService 2024-11-15T08:28:57,124 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:57,124 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,124 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:57,124 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7794): checking encryption for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,124 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(7797): checking classloading for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,124 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:57,124 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7752): Opening region: {ENCODED => 96df807fbed519d5640d78f1262cd1e8, NAME => 'testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:28:57,125 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. service=AccessControlService 2024-11-15T08:28:57,125 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:28:57,125 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,125 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:28:57,125 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7794): checking encryption for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,125 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(7797): checking classloading for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,125 INFO [StoreOpener-ce1ae584917574af269eaf0008327bd5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,127 INFO [StoreOpener-ce1ae584917574af269eaf0008327bd5-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ce1ae584917574af269eaf0008327bd5 columnFamilyName cf 2024-11-15T08:28:57,127 DEBUG [StoreOpener-ce1ae584917574af269eaf0008327bd5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:57,127 INFO [StoreOpener-96df807fbed519d5640d78f1262cd1e8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,127 INFO [StoreOpener-ce1ae584917574af269eaf0008327bd5-1 {}] regionserver.HStore(327): Store=ce1ae584917574af269eaf0008327bd5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:57,128 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1038): replaying wal for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,128 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,129 INFO [StoreOpener-96df807fbed519d5640d78f1262cd1e8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 96df807fbed519d5640d78f1262cd1e8 columnFamilyName cf 2024-11-15T08:28:57,129 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,129 DEBUG [StoreOpener-96df807fbed519d5640d78f1262cd1e8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:28:57,129 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1048): stopping wal replay for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,129 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1060): Cleaning up temporary data for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,129 INFO [StoreOpener-96df807fbed519d5640d78f1262cd1e8-1 {}] regionserver.HStore(327): Store=96df807fbed519d5640d78f1262cd1e8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:28:57,129 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1038): replaying wal for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,130 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,130 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,131 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1048): stopping wal replay for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,131 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1060): Cleaning up temporary data for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,131 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1093): writing seq id for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,132 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1093): writing seq id for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,133 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:57,133 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1114): Opened ce1ae584917574af269eaf0008327bd5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73400032, jitterRate=0.09374570846557617}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:57,134 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,134 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:28:57,134 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegion(1006): Region open journal for ce1ae584917574af269eaf0008327bd5: Running coprocessor pre-open hook at 1731659337124Writing region info on filesystem at 1731659337124Initializing all the Stores at 1731659337125 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659337125Cleaning up temporary data from old regions at 1731659337129 (+4 ms)Running coprocessor post-open hooks at 1731659337134 (+5 ms)Region opened successfully at 1731659337134 2024-11-15T08:28:57,135 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1114): Opened 96df807fbed519d5640d78f1262cd1e8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62367028, jitterRate=-0.0706588625907898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:28:57,135 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,135 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegion(1006): Region open journal for 96df807fbed519d5640d78f1262cd1e8: Running coprocessor pre-open hook at 1731659337125Writing region info on filesystem at 1731659337125Initializing all the Stores at 1731659337127 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659337127Cleaning up temporary data from old regions at 1731659337131 (+4 ms)Running coprocessor post-open hooks at 1731659337135 (+4 ms)Region opened successfully at 1731659337135 2024-11-15T08:28:57,136 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8., pid=179, masterSystemTime=1731659337121 2024-11-15T08:28:57,136 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5., pid=178, masterSystemTime=1731659337119 2024-11-15T08:28:57,138 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,138 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=178}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,138 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=176 updating hbase:meta row=ce1ae584917574af269eaf0008327bd5, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:28:57,139 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:57,139 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=179}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:57,140 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=177 updating hbase:meta row=96df807fbed519d5640d78f1262cd1e8, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:28:57,141 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=178, ppid=176, state=RUNNABLE, hasLock=false; OpenRegionProcedure ce1ae584917574af269eaf0008327bd5, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:28:57,142 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=179, ppid=177, state=RUNNABLE, hasLock=false; OpenRegionProcedure 96df807fbed519d5640d78f1262cd1e8, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:28:57,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=178, resume processing ppid=176 2024-11-15T08:28:57,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=176, state=SUCCESS, hasLock=false; OpenRegionProcedure ce1ae584917574af269eaf0008327bd5, server=48e8ad8dc936,37511,1731659175104 in 179 msec 2024-11-15T08:28:57,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-15T08:28:57,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; OpenRegionProcedure 96df807fbed519d5640d78f1262cd1e8, server=48e8ad8dc936,34555,1731659175344 in 178 msec 2024-11-15T08:28:57,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, ASSIGN in 338 msec 2024-11-15T08:28:57,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=177, resume processing ppid=175 2024-11-15T08:28:57,148 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, ppid=175, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, ASSIGN in 340 msec 2024-11-15T08:28:57,149 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:28:57,149 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659337149"}]},"ts":"1731659337149"} 2024-11-15T08:28:57,152 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-15T08:28:57,153 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=175, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:28:57,153 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-15T08:28:57,158 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-15T08:28:57,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:57,165 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:57,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:57,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:28:57,179 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,179 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,180 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,181 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-15T08:28:57,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 457 msec 2024-11-15T08:28:57,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=175 2024-11-15T08:28:57,357 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T08:28:57,357 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-11-15T08:28:57,357 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:57,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-11-15T08:28:57,362 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:57,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-11-15T08:28:57,363 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T08:28:57,366 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T08:28:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659337366 (current time:1731659337366). 2024-11-15T08:28:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-15T08:28:57,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:57,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28f027b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:57,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:57,370 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:57,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:57,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:57,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@409917a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:57,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:57,371 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,372 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33056, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:57,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9417dfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:57,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:57,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:57,376 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:57,379 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:28:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,380 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:57,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@740051b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:57,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:57,387 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:57,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:57,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:57,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31ba0f43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:57,387 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:57,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,390 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33074, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:57,390 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ecb90ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:57,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:57,395 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:57,396 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52088, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:57,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:57,402 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:28:57,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:57,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,403 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:57,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-15T08:28:57,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T08:28:57,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-15T08:28:57,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-15T08:28:57,410 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:57,412 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:57,420 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:57,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742217_1393 (size=161) 2024-11-15T08:28:57,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742217_1393 (size=161) 2024-11-15T08:28:57,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742217_1393 (size=161) 2024-11-15T08:28:57,449 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:57,449 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8}] 2024-11-15T08:28:57,450 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,451 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-15T08:28:57,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-15T08:28:57,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for ce1ae584917574af269eaf0008327bd5: 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. for emptySnaptb0-testExportWithChecksum completed. 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 96df807fbed519d5640d78f1262cd1e8: 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. for emptySnaptb0-testExportWithChecksum completed. 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:57,607 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:28:57,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742218_1394 (size=68) 2024-11-15T08:28:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742218_1394 (size=68) 2024-11-15T08:28:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742218_1394 (size=68) 2024-11-15T08:28:57,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-15T08:28:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-15T08:28:57,631 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,632 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,634 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5 in 184 msec 2024-11-15T08:28:57,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742219_1395 (size=68) 2024-11-15T08:28:57,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742219_1395 (size=68) 2024-11-15T08:28:57,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742219_1395 (size=68) 2024-11-15T08:28:57,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:57,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-15T08:28:57,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-15T08:28:57,646 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,646 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,654 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-15T08:28:57,655 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8 in 198 msec 2024-11-15T08:28:57,655 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:57,656 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:57,656 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:57,656 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-15T08:28:57,657 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-15T08:28:57,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742220_1396 (size=543) 2024-11-15T08:28:57,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742220_1396 (size=543) 2024-11-15T08:28:57,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742220_1396 (size=543) 2024-11-15T08:28:57,690 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:57,704 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:57,705 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-15T08:28:57,707 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:57,708 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-15T08:28:57,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 304 msec 2024-11-15T08:28:57,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-15T08:28:57,726 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T08:28:57,732 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='0ab9d289638ff6016fad42b1648717fac', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:28:57,732 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='1c49809cff94f19695362de6f83c3aadc', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:57,737 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='2defdc851d819ce7b7ce707032079b753', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:57,737 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3b7a681a295c126215c17e431f41632dc', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:57,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:57,738 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='4fb63a8161d1b8a814e97adea9223b6a9', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8., hostname=48e8ad8dc936,34555,1731659175344, seqNum=2] 2024-11-15T08:28:57,741 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34555 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:28:57,742 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T08:28:57,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-15T08:28:57,745 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,746 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:28:57,747 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T08:28:57,753 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T08:28:57,760 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-15T08:28:57,763 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659337763 (current time:1731659337763). 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c1cc91f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:57,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:57,764 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:57,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:57,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:57,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d1b0c4a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:57,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:57,765 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,766 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33094, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:57,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11d64973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:57,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:57,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:57,769 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52096, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:57,770 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:28:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,770 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:57,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c88b26d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:28:57,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:28:57,778 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:28:57,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:28:57,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:28:57,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7768238d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:28:57,778 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:28:57,779 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,780 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:28:57,781 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a006585, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:28:57,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:28:57,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:28:57,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:28:57,784 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52106, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:28:57,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:28:57,788 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:28:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:28:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:28:57,788 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:28:57,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-15T08:28:57,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:28:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-15T08:28:57,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-15T08:28:57,792 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:28:57,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T08:28:57,793 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:28:57,796 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:28:57,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742221_1397 (size=156) 2024-11-15T08:28:57,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742221_1397 (size=156) 2024-11-15T08:28:57,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742221_1397 (size=156) 2024-11-15T08:28:57,807 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:28:57,807 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5}, {pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8}] 2024-11-15T08:28:57,813 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:57,813 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:57,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T08:28:57,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34555 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=185 2024-11-15T08:28:57,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:57,966 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2902): Flushing 96df807fbed519d5640d78f1262cd1e8 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-15T08:28:57,966 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=184 2024-11-15T08:28:57,967 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:57,967 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2902): Flushing ce1ae584917574af269eaf0008327bd5 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-15T08:28:58,004 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/.tmp/cf/f7f2e88e47554521b36e173ab2d75a99 is 71, key is 15feaa22e1726d413df5c621abca4356/cf:q/1731659337741/Put/seqid=0 2024-11-15T08:28:58,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/.tmp/cf/e96110369ea9455aa1275197e22a77cb is 71, key is 00f242759e6c459b39fcea7efadb862e/cf:q/1731659337738/Put/seqid=0 2024-11-15T08:28:58,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742222_1398 (size=8256) 2024-11-15T08:28:58,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742222_1398 (size=8256) 2024-11-15T08:28:58,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742222_1398 (size=8256) 2024-11-15T08:28:58,033 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/.tmp/cf/f7f2e88e47554521b36e173ab2d75a99 2024-11-15T08:28:58,049 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/.tmp/cf/f7f2e88e47554521b36e173ab2d75a99 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 2024-11-15T08:28:58,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742223_1399 (size=5354) 2024-11-15T08:28:58,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742223_1399 (size=5354) 2024-11-15T08:28:58,057 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99, entries=46, sequenceid=6, filesize=8.1 K 2024-11-15T08:28:58,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742223_1399 (size=5354) 2024-11-15T08:28:58,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/.tmp/cf/e96110369ea9455aa1275197e22a77cb 2024-11-15T08:28:58,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 96df807fbed519d5640d78f1262cd1e8 in 92ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:58,058 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-15T08:28:58,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.HRegion(2603): Flush status journal for 96df807fbed519d5640d78f1262cd1e8: 2024-11-15T08:28:58,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. for snaptb0-testExportWithChecksum completed. 2024-11-15T08:28:58,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-15T08:28:58,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:58,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99] hfiles 2024-11-15T08:28:58,060 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 for snapshot=snaptb0-testExportWithChecksum 2024-11-15T08:28:58,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/.tmp/cf/e96110369ea9455aa1275197e22a77cb as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf/e96110369ea9455aa1275197e22a77cb 2024-11-15T08:28:58,070 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf/e96110369ea9455aa1275197e22a77cb, entries=4, sequenceid=6, filesize=5.2 K 2024-11-15T08:28:58,071 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for ce1ae584917574af269eaf0008327bd5 in 104ms, sequenceid=6, compaction requested=false 2024-11-15T08:28:58,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.HRegion(2603): Flush status journal for ce1ae584917574af269eaf0008327bd5: 2024-11-15T08:28:58,071 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. for snaptb0-testExportWithChecksum completed. 2024-11-15T08:28:58,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-15T08:28:58,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:28:58,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf/e96110369ea9455aa1275197e22a77cb] hfiles 2024-11-15T08:28:58,072 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf/e96110369ea9455aa1275197e22a77cb for snapshot=snaptb0-testExportWithChecksum 2024-11-15T08:28:58,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742224_1400 (size=107) 2024-11-15T08:28:58,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742224_1400 (size=107) 2024-11-15T08:28:58,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742224_1400 (size=107) 2024-11-15T08:28:58,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:28:58,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=185}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=185 2024-11-15T08:28:58,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=185 2024-11-15T08:28:58,087 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:58,088 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:28:58,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 96df807fbed519d5640d78f1262cd1e8 in 282 msec 2024-11-15T08:28:58,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742225_1401 (size=107) 2024-11-15T08:28:58,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742225_1401 (size=107) 2024-11-15T08:28:58,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742225_1401 (size=107) 2024-11-15T08:28:58,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:28:58,098 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-15T08:28:58,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=184 2024-11-15T08:28:58,099 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:58,099 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5 2024-11-15T08:28:58,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=183 2024-11-15T08:28:58,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ce1ae584917574af269eaf0008327bd5 in 293 msec 2024-11-15T08:28:58,102 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:28:58,103 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:28:58,104 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:28:58,104 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-15T08:28:58,105 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T08:28:58,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T08:28:58,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742226_1402 (size=621) 2024-11-15T08:28:58,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742226_1402 (size=621) 2024-11-15T08:28:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742226_1402 (size=621) 2024-11-15T08:28:58,126 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:28:58,131 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:28:58,131 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-15T08:28:58,132 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=183, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:28:58,132 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 183 2024-11-15T08:28:58,133 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=183, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 342 msec 2024-11-15T08:28:58,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-15T08:28:58,416 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T08:28:58,417 INFO [Time-limited test {}] snapshot.TestExportSnapshot(475): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417 2024-11-15T08:28:58,417 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:58,446 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:28:58,446 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=org.apache.hadoop.fs.LocalFileSystem@491c065d, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T08:28:58,447 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:28:58,451 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T08:28:58,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:58,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:58,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,520 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-2960384224418779429.jar 2024-11-15T08:28:59,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,521 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,586 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-11361016189879808990.jar 2024-11-15T08:28:59,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,587 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:28:59,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:28:59,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:28:59,588 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:28:59,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:28:59,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:28:59,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:28:59,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:28:59,589 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:28:59,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:28:59,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:28:59,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:28:59,590 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:59,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:59,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:59,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:59,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:28:59,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:59,591 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:28:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742227_1403 (size=131440) 2024-11-15T08:28:59,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742227_1403 (size=131440) 2024-11-15T08:28:59,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742227_1403 (size=131440) 2024-11-15T08:28:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742228_1404 (size=4188619) 2024-11-15T08:28:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742228_1404 (size=4188619) 2024-11-15T08:28:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742228_1404 (size=4188619) 2024-11-15T08:28:59,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742229_1405 (size=1323991) 2024-11-15T08:28:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742229_1405 (size=1323991) 2024-11-15T08:28:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742229_1405 (size=1323991) 2024-11-15T08:28:59,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742230_1406 (size=903738) 2024-11-15T08:28:59,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742230_1406 (size=903738) 2024-11-15T08:28:59,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742230_1406 (size=903738) 2024-11-15T08:28:59,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742231_1407 (size=8360083) 2024-11-15T08:28:59,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742231_1407 (size=8360083) 2024-11-15T08:28:59,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742231_1407 (size=8360083) 2024-11-15T08:28:59,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742232_1408 (size=1877034) 2024-11-15T08:28:59,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742232_1408 (size=1877034) 2024-11-15T08:28:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742232_1408 (size=1877034) 2024-11-15T08:28:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742233_1409 (size=77835) 2024-11-15T08:28:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742233_1409 (size=77835) 2024-11-15T08:28:59,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742233_1409 (size=77835) 2024-11-15T08:28:59,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742234_1410 (size=30949) 2024-11-15T08:28:59,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742234_1410 (size=30949) 2024-11-15T08:28:59,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742234_1410 (size=30949) 2024-11-15T08:28:59,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742235_1411 (size=1597327) 2024-11-15T08:28:59,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742235_1411 (size=1597327) 2024-11-15T08:28:59,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742235_1411 (size=1597327) 2024-11-15T08:28:59,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742236_1412 (size=6424751) 2024-11-15T08:28:59,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742236_1412 (size=6424751) 2024-11-15T08:28:59,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742236_1412 (size=6424751) 2024-11-15T08:28:59,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742237_1413 (size=4695811) 2024-11-15T08:28:59,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742237_1413 (size=4695811) 2024-11-15T08:28:59,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742237_1413 (size=4695811) 2024-11-15T08:28:59,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742238_1414 (size=232957) 2024-11-15T08:28:59,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742238_1414 (size=232957) 2024-11-15T08:28:59,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742238_1414 (size=232957) 2024-11-15T08:28:59,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742239_1415 (size=127628) 2024-11-15T08:28:59,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742239_1415 (size=127628) 2024-11-15T08:28:59,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742239_1415 (size=127628) 2024-11-15T08:28:59,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742240_1416 (size=20406) 2024-11-15T08:28:59,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742240_1416 (size=20406) 2024-11-15T08:28:59,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742240_1416 (size=20406) 2024-11-15T08:28:59,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742241_1417 (size=440656) 2024-11-15T08:28:59,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742241_1417 (size=440656) 2024-11-15T08:28:59,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742241_1417 (size=440656) 2024-11-15T08:28:59,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742242_1418 (size=5175431) 2024-11-15T08:28:59,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742242_1418 (size=5175431) 2024-11-15T08:28:59,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742242_1418 (size=5175431) 2024-11-15T08:28:59,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742243_1419 (size=217634) 2024-11-15T08:28:59,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742243_1419 (size=217634) 2024-11-15T08:28:59,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742243_1419 (size=217634) 2024-11-15T08:28:59,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742244_1420 (size=1832290) 2024-11-15T08:28:59,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742244_1420 (size=1832290) 2024-11-15T08:28:59,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742244_1420 (size=1832290) 2024-11-15T08:28:59,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742245_1421 (size=322274) 2024-11-15T08:28:59,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742245_1421 (size=322274) 2024-11-15T08:28:59,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742245_1421 (size=322274) 2024-11-15T08:28:59,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742246_1422 (size=503880) 2024-11-15T08:28:59,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742246_1422 (size=503880) 2024-11-15T08:28:59,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742246_1422 (size=503880) 2024-11-15T08:28:59,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742247_1423 (size=29229) 2024-11-15T08:28:59,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742247_1423 (size=29229) 2024-11-15T08:28:59,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742247_1423 (size=29229) 2024-11-15T08:28:59,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742248_1424 (size=24096) 2024-11-15T08:28:59,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742248_1424 (size=24096) 2024-11-15T08:28:59,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742248_1424 (size=24096) 2024-11-15T08:28:59,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742249_1425 (size=111872) 2024-11-15T08:28:59,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742249_1425 (size=111872) 2024-11-15T08:28:59,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742249_1425 (size=111872) 2024-11-15T08:28:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742250_1426 (size=45609) 2024-11-15T08:28:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742250_1426 (size=45609) 2024-11-15T08:28:59,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742250_1426 (size=45609) 2024-11-15T08:28:59,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742251_1427 (size=136454) 2024-11-15T08:28:59,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742251_1427 (size=136454) 2024-11-15T08:28:59,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742251_1427 (size=136454) 2024-11-15T08:28:59,995 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:28:59,997 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-15T08:28:59,999 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:29:00,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742252_1428 (size=338) 2024-11-15T08:29:00,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742252_1428 (size=338) 2024-11-15T08:29:00,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742252_1428 (size=338) 2024-11-15T08:29:00,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742253_1429 (size=15) 2024-11-15T08:29:00,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742253_1429 (size=15) 2024-11-15T08:29:00,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742253_1429 (size=15) 2024-11-15T08:29:00,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742254_1430 (size=303778) 2024-11-15T08:29:00,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742254_1430 (size=303778) 2024-11-15T08:29:00,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742254_1430 (size=303778) 2024-11-15T08:29:01,064 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:29:01,064 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:29:01,072 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0007_000001 (auth:SIMPLE) from 127.0.0.1:50160 2024-11-15T08:29:01,083 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_3/usercache/jenkins/appcache/application_1731659181416_0007/container_1731659181416_0007_01_000001/launch_container.sh] 2024-11-15T08:29:01,083 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_3/usercache/jenkins/appcache/application_1731659181416_0007/container_1731659181416_0007_01_000001/container_tokens] 2024-11-15T08:29:01,083 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_3/usercache/jenkins/appcache/application_1731659181416_0007/container_1731659181416_0007_01_000001/sysfs] 2024-11-15T08:29:01,893 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:44328 2024-11-15T08:29:01,942 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:29:04,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-15T08:29:04,711 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-15T08:29:04,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-15T08:29:08,157 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:60102 2024-11-15T08:29:08,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742255_1431 (size=349428) 2024-11-15T08:29:08,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742255_1431 (size=349428) 2024-11-15T08:29:08,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742255_1431 (size=349428) 2024-11-15T08:29:10,214 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:29:10,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:44332 2024-11-15T08:29:13,477 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:29:14,762 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000002/launch_container.sh] 2024-11-15T08:29:14,762 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000002/container_tokens] 2024-11-15T08:29:14,762 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417/archive/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-15T08:29:16,283 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:53138 2024-11-15T08:29:17,658 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4746d1a25f8d5d89e0a76786d0716410, had cached 0 bytes from a total of 5286 2024-11-15T08:29:17,658 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1f91fa0aee2cc478532b519b97469773, had cached 0 bytes from a total of 8324 2024-11-15T08:29:17,905 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1f91fa0aee2cc478532b519b97469773 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:29:17,905 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 96df807fbed519d5640d78f1262cd1e8 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:29:17,905 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 4746d1a25f8d5d89e0a76786d0716410 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:29:17,905 DEBUG [master/48e8ad8dc936:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ce1ae584917574af269eaf0008327bd5 changed from -1.0 to 0.0, refreshing cache 2024-11-15T08:29:21,157 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000003/launch_container.sh] 2024-11-15T08:29:21,157 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000003/container_tokens] 2024-11-15T08:29:21,157 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_3/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417/archive/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-15T08:29:22,304 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:55562 2024-11-15T08:29:27,204 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_1/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000004/launch_container.sh] 2024-11-15T08:29:27,204 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_1/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000004/container_tokens] 2024-11-15T08:29:27,204 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_1/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/local-export-1731659338417/archive/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:599) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:335) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:257) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:181) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-15T08:29:28,337 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:46408 2024-11-15T08:29:32,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742256_1432 (size=21330) 2024-11-15T08:29:32,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742256_1432 (size=21330) 2024-11-15T08:29:32,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742256_1432 (size=21330) 2024-11-15T08:29:32,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742257_1433 (size=460) 2024-11-15T08:29:32,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742257_1433 (size=460) 2024-11-15T08:29:32,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742257_1433 (size=460) 2024-11-15T08:29:32,630 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_2/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000005/launch_container.sh] 2024-11-15T08:29:32,630 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_2/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000005/container_tokens] 2024-11-15T08:29:32,630 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_2/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000005/sysfs] 2024-11-15T08:29:32,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742258_1434 (size=21330) 2024-11-15T08:29:32,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742258_1434 (size=21330) 2024-11-15T08:29:32,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742258_1434 (size=21330) 2024-11-15T08:29:32,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742259_1435 (size=349428) 2024-11-15T08:29:32,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742259_1435 (size=349428) 2024-11-15T08:29:32,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742259_1435 (size=349428) 2024-11-15T08:29:32,714 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:51920 2024-11-15T08:29:34,413 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1230): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1731659181416_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:938) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1207) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:522) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:352) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:29:34,415 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414 2024-11-15T08:29:34,415 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:29:34,452 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:29:34,452 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T08:29:34,454 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:29:34,459 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-15T08:29:34,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742260_1436 (size=621) 2024-11-15T08:29:34,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742260_1436 (size=621) 2024-11-15T08:29:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742260_1436 (size=621) 2024-11-15T08:29:34,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742261_1437 (size=156) 2024-11-15T08:29:34,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742261_1437 (size=156) 2024-11-15T08:29:34,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742261_1437 (size=156) 2024-11-15T08:29:34,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:34,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:34,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-13706298376495499379.jar 2024-11-15T08:29:35,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-977361482331318112.jar 2024-11-15T08:29:35,648 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:35,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:29:35,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:29:35,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:29:35,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:29:35,650 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:29:35,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:29:35,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:29:35,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:29:35,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:29:35,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:29:35,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:29:35,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:35,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:35,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:29:35,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:35,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:35,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:29:35,653 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:29:35,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742262_1438 (size=131440) 2024-11-15T08:29:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742262_1438 (size=131440) 2024-11-15T08:29:35,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742262_1438 (size=131440) 2024-11-15T08:29:35,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742263_1439 (size=4188619) 2024-11-15T08:29:35,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742263_1439 (size=4188619) 2024-11-15T08:29:35,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742263_1439 (size=4188619) 2024-11-15T08:29:35,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742264_1440 (size=1323991) 2024-11-15T08:29:35,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742264_1440 (size=1323991) 2024-11-15T08:29:35,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742264_1440 (size=1323991) 2024-11-15T08:29:35,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742265_1441 (size=903738) 2024-11-15T08:29:35,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742265_1441 (size=903738) 2024-11-15T08:29:35,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742265_1441 (size=903738) 2024-11-15T08:29:35,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742266_1442 (size=8360083) 2024-11-15T08:29:35,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742266_1442 (size=8360083) 2024-11-15T08:29:35,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742266_1442 (size=8360083) 2024-11-15T08:29:36,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742267_1443 (size=1877034) 2024-11-15T08:29:36,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742267_1443 (size=1877034) 2024-11-15T08:29:36,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742267_1443 (size=1877034) 2024-11-15T08:29:36,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742268_1444 (size=77835) 2024-11-15T08:29:36,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742268_1444 (size=77835) 2024-11-15T08:29:36,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742268_1444 (size=77835) 2024-11-15T08:29:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742269_1445 (size=30949) 2024-11-15T08:29:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742269_1445 (size=30949) 2024-11-15T08:29:36,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742269_1445 (size=30949) 2024-11-15T08:29:36,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742270_1446 (size=1597327) 2024-11-15T08:29:36,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742270_1446 (size=1597327) 2024-11-15T08:29:36,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742270_1446 (size=1597327) 2024-11-15T08:29:36,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742271_1447 (size=4695811) 2024-11-15T08:29:36,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742271_1447 (size=4695811) 2024-11-15T08:29:36,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742271_1447 (size=4695811) 2024-11-15T08:29:36,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742272_1448 (size=232957) 2024-11-15T08:29:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742272_1448 (size=232957) 2024-11-15T08:29:36,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742272_1448 (size=232957) 2024-11-15T08:29:36,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742273_1449 (size=127628) 2024-11-15T08:29:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742273_1449 (size=127628) 2024-11-15T08:29:36,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742273_1449 (size=127628) 2024-11-15T08:29:36,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742274_1450 (size=440656) 2024-11-15T08:29:36,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742274_1450 (size=440656) 2024-11-15T08:29:36,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742274_1450 (size=440656) 2024-11-15T08:29:36,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742275_1451 (size=20406) 2024-11-15T08:29:36,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742275_1451 (size=20406) 2024-11-15T08:29:36,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742275_1451 (size=20406) 2024-11-15T08:29:36,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742276_1452 (size=6424751) 2024-11-15T08:29:36,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742276_1452 (size=6424751) 2024-11-15T08:29:36,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742276_1452 (size=6424751) 2024-11-15T08:29:36,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742277_1453 (size=5175431) 2024-11-15T08:29:36,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742277_1453 (size=5175431) 2024-11-15T08:29:36,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742277_1453 (size=5175431) 2024-11-15T08:29:36,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742278_1454 (size=217634) 2024-11-15T08:29:36,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742278_1454 (size=217634) 2024-11-15T08:29:36,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742278_1454 (size=217634) 2024-11-15T08:29:36,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742279_1455 (size=1832290) 2024-11-15T08:29:36,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742279_1455 (size=1832290) 2024-11-15T08:29:36,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742279_1455 (size=1832290) 2024-11-15T08:29:36,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742280_1456 (size=322274) 2024-11-15T08:29:36,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742280_1456 (size=322274) 2024-11-15T08:29:36,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742280_1456 (size=322274) 2024-11-15T08:29:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742281_1457 (size=503880) 2024-11-15T08:29:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742281_1457 (size=503880) 2024-11-15T08:29:36,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742281_1457 (size=503880) 2024-11-15T08:29:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742282_1458 (size=29229) 2024-11-15T08:29:36,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742282_1458 (size=29229) 2024-11-15T08:29:36,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742282_1458 (size=29229) 2024-11-15T08:29:36,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742283_1459 (size=24096) 2024-11-15T08:29:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742283_1459 (size=24096) 2024-11-15T08:29:36,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742283_1459 (size=24096) 2024-11-15T08:29:36,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742284_1460 (size=111872) 2024-11-15T08:29:36,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742284_1460 (size=111872) 2024-11-15T08:29:36,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742284_1460 (size=111872) 2024-11-15T08:29:36,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742285_1461 (size=45609) 2024-11-15T08:29:36,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742285_1461 (size=45609) 2024-11-15T08:29:36,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742285_1461 (size=45609) 2024-11-15T08:29:36,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742286_1462 (size=136454) 2024-11-15T08:29:36,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742286_1462 (size=136454) 2024-11-15T08:29:36,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742286_1462 (size=136454) 2024-11-15T08:29:36,606 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:29:36,609 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-15T08:29:36,611 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:29:36,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742287_1463 (size=338) 2024-11-15T08:29:36,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742287_1463 (size=338) 2024-11-15T08:29:36,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742287_1463 (size=338) 2024-11-15T08:29:36,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742288_1464 (size=15) 2024-11-15T08:29:36,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742288_1464 (size=15) 2024-11-15T08:29:36,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742288_1464 (size=15) 2024-11-15T08:29:36,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742289_1465 (size=303728) 2024-11-15T08:29:36,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742289_1465 (size=303728) 2024-11-15T08:29:36,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742289_1465 (size=303728) 2024-11-15T08:29:38,787 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:29:38,788 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:29:38,791 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0008_000001 (auth:SIMPLE) from 127.0.0.1:51926 2024-11-15T08:29:38,801 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000001/launch_container.sh] 2024-11-15T08:29:38,801 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000001/container_tokens] 2024-11-15T08:29:38,801 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-1_0/usercache/jenkins/appcache/application_1731659181416_0008/container_1731659181416_0008_01_000001/sysfs] 2024-11-15T08:29:39,194 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0009_000001 (auth:SIMPLE) from 127.0.0.1:52674 2024-11-15T08:29:42,124 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region ce1ae584917574af269eaf0008327bd5, had cached 0 bytes from a total of 5354 2024-11-15T08:29:42,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 96df807fbed519d5640d78f1262cd1e8, had cached 0 bytes from a total of 8256 2024-11-15T08:29:43,478 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:29:44,832 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0009_000001 (auth:SIMPLE) from 127.0.0.1:39616 2024-11-15T08:29:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742290_1466 (size=349378) 2024-11-15T08:29:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742290_1466 (size=349378) 2024-11-15T08:29:45,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742290_1466 (size=349378) 2024-11-15T08:29:47,216 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0009_000001 (auth:SIMPLE) from 127.0.0.1:60920 2024-11-15T08:29:51,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742291_1467 (size=8256) 2024-11-15T08:29:51,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742291_1467 (size=8256) 2024-11-15T08:29:51,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742291_1467 (size=8256) 2024-11-15T08:29:51,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742292_1468 (size=5354) 2024-11-15T08:29:51,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742292_1468 (size=5354) 2024-11-15T08:29:51,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742292_1468 (size=5354) 2024-11-15T08:29:51,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742293_1469 (size=17413) 2024-11-15T08:29:51,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742293_1469 (size=17413) 2024-11-15T08:29:51,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742293_1469 (size=17413) 2024-11-15T08:29:51,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742294_1470 (size=462) 2024-11-15T08:29:51,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742294_1470 (size=462) 2024-11-15T08:29:51,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742294_1470 (size=462) 2024-11-15T08:29:52,004 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_2/usercache/jenkins/appcache/application_1731659181416_0009/container_1731659181416_0009_01_000002/launch_container.sh] 2024-11-15T08:29:52,004 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_2/usercache/jenkins/appcache/application_1731659181416_0009/container_1731659181416_0009_01_000002/container_tokens] 2024-11-15T08:29:52,004 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_2/usercache/jenkins/appcache/application_1731659181416_0009/container_1731659181416_0009_01_000002/sysfs] 2024-11-15T08:29:52,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742295_1471 (size=17413) 2024-11-15T08:29:52,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742295_1471 (size=17413) 2024-11-15T08:29:52,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742295_1471 (size=17413) 2024-11-15T08:29:52,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742296_1472 (size=349378) 2024-11-15T08:29:52,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742296_1472 (size=349378) 2024-11-15T08:29:52,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742296_1472 (size=349378) 2024-11-15T08:29:52,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0009_000001 (auth:SIMPLE) from 127.0.0.1:41656 2024-11-15T08:29:53,849 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:29:53,849 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:29:53,873 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportWithChecksum 2024-11-15T08:29:53,874 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:29:53,874 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:29:53,874 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-15T08:29:53,875 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-15T08:29:53,875 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-15T08:29:53,875 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-15T08:29:53,875 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-15T08:29:53,875 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659374414/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-15T08:29:53,883 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-15T08:29:53,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=186, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:53,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-15T08:29:53,888 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659393888"}]},"ts":"1731659393888"} 2024-11-15T08:29:53,890 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-15T08:29:53,890 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-15T08:29:53,891 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-15T08:29:53,892 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, UNASSIGN}, {pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, UNASSIGN}] 2024-11-15T08:29:53,893 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, UNASSIGN 2024-11-15T08:29:53,894 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, UNASSIGN 2024-11-15T08:29:53,894 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=ce1ae584917574af269eaf0008327bd5, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:29:53,894 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=96df807fbed519d5640d78f1262cd1e8, regionState=CLOSING, regionLocation=48e8ad8dc936,34555,1731659175344 2024-11-15T08:29:53,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=188, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, UNASSIGN because future has completed 2024-11-15T08:29:53,897 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:29:53,897 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure ce1ae584917574af269eaf0008327bd5, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:29:53,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=189, ppid=187, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, UNASSIGN because future has completed 2024-11-15T08:29:53,899 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:29:53,899 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 96df807fbed519d5640d78f1262cd1e8, server=48e8ad8dc936,34555,1731659175344}] 2024-11-15T08:29:53,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-15T08:29:54,050 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(122): Close ce1ae584917574af269eaf0008327bd5 2024-11-15T08:29:54,050 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:29:54,051 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1722): Closing ce1ae584917574af269eaf0008327bd5, disabling compactions & flushes 2024-11-15T08:29:54,051 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:29:54,051 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:29:54,051 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. after waiting 0 ms 2024-11-15T08:29:54,051 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:29:54,052 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(122): Close 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:29:54,052 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:29:54,052 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1722): Closing 96df807fbed519d5640d78f1262cd1e8, disabling compactions & flushes 2024-11-15T08:29:54,052 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:29:54,052 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:29:54,052 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. after waiting 0 ms 2024-11-15T08:29:54,052 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:29:54,055 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:29:54,056 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:29:54,056 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:29:54,056 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5. 2024-11-15T08:29:54,056 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] regionserver.HRegion(1676): Region close journal for ce1ae584917574af269eaf0008327bd5: Waiting for close lock at 1731659394050Running coprocessor pre-close hooks at 1731659394050Disabling compacts and flushes for region at 1731659394050Disabling writes for close at 1731659394051 (+1 ms)Writing region close event to WAL at 1731659394052 (+1 ms)Running coprocessor post-close hooks at 1731659394056 (+4 ms)Closed at 1731659394056 2024-11-15T08:29:54,056 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:29:54,056 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8. 2024-11-15T08:29:54,056 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] regionserver.HRegion(1676): Region close journal for 96df807fbed519d5640d78f1262cd1e8: Waiting for close lock at 1731659394052Running coprocessor pre-close hooks at 1731659394052Disabling compacts and flushes for region at 1731659394052Disabling writes for close at 1731659394052Writing region close event to WAL at 1731659394053 (+1 ms)Running coprocessor post-close hooks at 1731659394056 (+3 ms)Closed at 1731659394056 2024-11-15T08:29:54,058 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=190}] handler.UnassignRegionHandler(157): Closed ce1ae584917574af269eaf0008327bd5 2024-11-15T08:29:54,059 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=188 updating hbase:meta row=ce1ae584917574af269eaf0008327bd5, regionState=CLOSED 2024-11-15T08:29:54,059 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=191}] handler.UnassignRegionHandler(157): Closed 96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:29:54,059 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=189 updating hbase:meta row=96df807fbed519d5640d78f1262cd1e8, regionState=CLOSED 2024-11-15T08:29:54,061 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=188, state=RUNNABLE, hasLock=false; CloseRegionProcedure ce1ae584917574af269eaf0008327bd5, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:29:54,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE, hasLock=false; CloseRegionProcedure 96df807fbed519d5640d78f1262cd1e8, server=48e8ad8dc936,34555,1731659175344 because future has completed 2024-11-15T08:29:54,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=188 2024-11-15T08:29:54,064 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; CloseRegionProcedure ce1ae584917574af269eaf0008327bd5, server=48e8ad8dc936,37511,1731659175104 in 165 msec 2024-11-15T08:29:54,065 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=ce1ae584917574af269eaf0008327bd5, UNASSIGN in 172 msec 2024-11-15T08:29:54,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=191, resume processing ppid=189 2024-11-15T08:29:54,065 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; CloseRegionProcedure 96df807fbed519d5640d78f1262cd1e8, server=48e8ad8dc936,34555,1731659175344 in 164 msec 2024-11-15T08:29:54,067 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=187 2024-11-15T08:29:54,067 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=187, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=96df807fbed519d5640d78f1262cd1e8, UNASSIGN in 173 msec 2024-11-15T08:29:54,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=186 2024-11-15T08:29:54,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=186, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 177 msec 2024-11-15T08:29:54,072 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659394072"}]},"ts":"1731659394072"} 2024-11-15T08:29:54,073 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-15T08:29:54,074 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-15T08:29:54,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 191 msec 2024-11-15T08:29:54,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=186 2024-11-15T08:29:54,206 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T08:29:54,206 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-15T08:29:54,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:54,209 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=192, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:54,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-15T08:29:54,210 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=192, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:54,213 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-15T08:29:54,214 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5 2024-11-15T08:29:54,214 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:29:54,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,217 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,217 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/recovered.edits] 2024-11-15T08:29:54,217 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/recovered.edits] 2024-11-15T08:29:54,218 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T08:29:54,218 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T08:29:54,218 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-15T08:29:54,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,219 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-15T08:29:54,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,221 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data null 2024-11-15T08:29:54,221 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-15T08:29:54,222 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,223 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,223 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,223 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-15T08:29:54,226 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/cf/f7f2e88e47554521b36e173ab2d75a99 2024-11-15T08:29:54,232 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf/e96110369ea9455aa1275197e22a77cb to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/cf/e96110369ea9455aa1275197e22a77cb 2024-11-15T08:29:54,233 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8/recovered.edits/9.seqid 2024-11-15T08:29:54,237 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/96df807fbed519d5640d78f1262cd1e8 2024-11-15T08:29:54,239 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5/recovered.edits/9.seqid 2024-11-15T08:29:54,239 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportWithChecksum/ce1ae584917574af269eaf0008327bd5 2024-11-15T08:29:54,240 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-15T08:29:54,242 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=192, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:54,245 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-15T08:29:54,249 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-15T08:29:54,250 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=192, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:54,250 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-15T08:29:54,250 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659394250"}]},"ts":"9223372036854775807"} 2024-11-15T08:29:54,250 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659394250"}]},"ts":"9223372036854775807"} 2024-11-15T08:29:54,252 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:29:54,252 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ce1ae584917574af269eaf0008327bd5, NAME => 'testtb-testExportWithChecksum,,1731659336722.ce1ae584917574af269eaf0008327bd5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 96df807fbed519d5640d78f1262cd1e8, NAME => 'testtb-testExportWithChecksum,1,1731659336722.96df807fbed519d5640d78f1262cd1e8.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:29:54,252 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-15T08:29:54,252 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659394252"}]},"ts":"9223372036854775807"} 2024-11-15T08:29:54,254 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-15T08:29:54,255 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=192, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-15T08:29:54,257 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 49 msec 2024-11-15T08:29:54,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=192 2024-11-15T08:29:54,326 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-15T08:29:54,326 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-15T08:29:54,332 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-15T08:29:54,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-15T08:29:54,335 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-15T08:29:54,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-15T08:29:54,374 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=801 (was 805), OpenFileDescriptor=802 (was 817), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=673 (was 433) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1375 (was 2053) 2024-11-15T08:29:54,374 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-15T08:29:54,398 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=801, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=673, ProcessCount=20, AvailableMemoryMB=1372 2024-11-15T08:29:54,398 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=801 is superior to 500 2024-11-15T08:29:54,400 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-15T08:29:54,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:54,402 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-15T08:29:54,402 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:29:54,402 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 193 2024-11-15T08:29:54,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T08:29:54,403 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-15T08:29:54,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742297_1473 (size=418) 2024-11-15T08:29:54,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742297_1473 (size=418) 2024-11-15T08:29:54,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742297_1473 (size=418) 2024-11-15T08:29:54,422 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 0bfb5df214fbf62f39e23b26ca694374, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:29:54,424 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d964013f8c33adbdff757626b30f3756, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:29:54,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742298_1474 (size=79) 2024-11-15T08:29:54,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742298_1474 (size=79) 2024-11-15T08:29:54,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742298_1474 (size=79) 2024-11-15T08:29:54,445 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:29:54,445 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 0bfb5df214fbf62f39e23b26ca694374, disabling compactions & flushes 2024-11-15T08:29:54,445 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,445 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,445 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. after waiting 0 ms 2024-11-15T08:29:54,445 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,445 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,448 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 0bfb5df214fbf62f39e23b26ca694374: Waiting for close lock at 1731659394445Disabling compacts and flushes for region at 1731659394445Disabling writes for close at 1731659394445Writing region close event to WAL at 1731659394445Closed at 1731659394445 2024-11-15T08:29:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742299_1475 (size=79) 2024-11-15T08:29:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742299_1475 (size=79) 2024-11-15T08:29:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742299_1475 (size=79) 2024-11-15T08:29:54,465 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:29:54,465 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing d964013f8c33adbdff757626b30f3756, disabling compactions & flushes 2024-11-15T08:29:54,465 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,465 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,465 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. after waiting 0 ms 2024-11-15T08:29:54,465 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,465 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,465 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for d964013f8c33adbdff757626b30f3756: Waiting for close lock at 1731659394465Disabling compacts and flushes for region at 1731659394465Disabling writes for close at 1731659394465Writing region close event to WAL at 1731659394465Closed at 1731659394465 2024-11-15T08:29:54,466 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-15T08:29:54,467 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731659394466"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659394466"}]},"ts":"1731659394466"} 2024-11-15T08:29:54,467 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1731659394466"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731659394466"}]},"ts":"1731659394466"} 2024-11-15T08:29:54,470 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-15T08:29:54,471 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-15T08:29:54,472 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659394471"}]},"ts":"1731659394471"} 2024-11-15T08:29:54,473 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-15T08:29:54,474 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {48e8ad8dc936=0} racks are {/default-rack=0} 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-15T08:29:54,475 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-15T08:29:54,475 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-15T08:29:54,475 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-15T08:29:54,475 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-15T08:29:54,476 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, ASSIGN}, {pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, ASSIGN}] 2024-11-15T08:29:54,477 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, ASSIGN 2024-11-15T08:29:54,478 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, ASSIGN 2024-11-15T08:29:54,479 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, ASSIGN; state=OFFLINE, location=48e8ad8dc936,37511,1731659175104; forceNewPlan=false, retain=false 2024-11-15T08:29:54,480 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, ASSIGN; state=OFFLINE, location=48e8ad8dc936,40637,1731659175269; forceNewPlan=false, retain=false 2024-11-15T08:29:54,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T08:29:54,630 INFO [48e8ad8dc936:35169 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-15T08:29:54,630 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=0bfb5df214fbf62f39e23b26ca694374, regionState=OPENING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:29:54,630 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=d964013f8c33adbdff757626b30f3756, regionState=OPENING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:29:54,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, ASSIGN because future has completed 2024-11-15T08:29:54,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0bfb5df214fbf62f39e23b26ca694374, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:29:54,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=193, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, ASSIGN because future has completed 2024-11-15T08:29:54,634 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=197, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure d964013f8c33adbdff757626b30f3756, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:29:54,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-15T08:29:54,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T08:29:54,789 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,789 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7752): Opening region: {ENCODED => d964013f8c33adbdff757626b30f3756, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.', STARTKEY => '', ENDKEY => '1'} 2024-11-15T08:29:54,790 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. service=AccessControlService 2024-11-15T08:29:54,790 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:29:54,790 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,790 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,790 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:29:54,790 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7752): Opening region: {ENCODED => 0bfb5df214fbf62f39e23b26ca694374, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.', STARTKEY => '1', ENDKEY => ''} 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7794): checking encryption for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(7797): checking classloading for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. service=AccessControlService 2024-11-15T08:29:54,791 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7794): checking encryption for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,791 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(7797): checking classloading for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,805 INFO [StoreOpener-d964013f8c33adbdff757626b30f3756-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,805 INFO [StoreOpener-0bfb5df214fbf62f39e23b26ca694374-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,807 INFO [StoreOpener-0bfb5df214fbf62f39e23b26ca694374-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0bfb5df214fbf62f39e23b26ca694374 columnFamilyName cf 2024-11-15T08:29:54,807 INFO [StoreOpener-d964013f8c33adbdff757626b30f3756-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d964013f8c33adbdff757626b30f3756 columnFamilyName cf 2024-11-15T08:29:54,807 DEBUG [StoreOpener-0bfb5df214fbf62f39e23b26ca694374-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:29:54,807 DEBUG [StoreOpener-d964013f8c33adbdff757626b30f3756-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-15T08:29:54,808 INFO [StoreOpener-0bfb5df214fbf62f39e23b26ca694374-1 {}] regionserver.HStore(327): Store=0bfb5df214fbf62f39e23b26ca694374/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:29:54,808 INFO [StoreOpener-d964013f8c33adbdff757626b30f3756-1 {}] regionserver.HStore(327): Store=d964013f8c33adbdff757626b30f3756/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-15T08:29:54,808 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1038): replaying wal for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,808 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1038): replaying wal for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,809 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,809 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,809 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,809 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,810 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1048): stopping wal replay for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,810 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1060): Cleaning up temporary data for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,810 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1048): stopping wal replay for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,810 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1060): Cleaning up temporary data for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,812 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1093): writing seq id for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,812 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1093): writing seq id for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,817 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:29:54,817 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-15T08:29:54,818 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1114): Opened 0bfb5df214fbf62f39e23b26ca694374; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61235435, jitterRate=-0.08752091228961945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:29:54,818 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:54,817 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1114): Opened d964013f8c33adbdff757626b30f3756; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67184924, jitterRate=0.0011333823204040527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-15T08:29:54,818 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:54,818 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegion(1006): Region open journal for 0bfb5df214fbf62f39e23b26ca694374: Running coprocessor pre-open hook at 1731659394791Writing region info on filesystem at 1731659394791Initializing all the Stores at 1731659394793 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659394794 (+1 ms)Cleaning up temporary data from old regions at 1731659394810 (+16 ms)Running coprocessor post-open hooks at 1731659394818 (+8 ms)Region opened successfully at 1731659394818 2024-11-15T08:29:54,818 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegion(1006): Region open journal for d964013f8c33adbdff757626b30f3756: Running coprocessor pre-open hook at 1731659394791Writing region info on filesystem at 1731659394791Initializing all the Stores at 1731659394793 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731659394793Cleaning up temporary data from old regions at 1731659394810 (+17 ms)Running coprocessor post-open hooks at 1731659394818 (+8 ms)Region opened successfully at 1731659394818 2024-11-15T08:29:54,820 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374., pid=196, masterSystemTime=1731659394785 2024-11-15T08:29:54,820 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756., pid=197, masterSystemTime=1731659394786 2024-11-15T08:29:54,822 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,822 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=197}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:54,822 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=d964013f8c33adbdff757626b30f3756, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:29:54,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=194, state=RUNNABLE, hasLock=false; OpenRegionProcedure d964013f8c33adbdff757626b30f3756, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:29:54,825 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=195 updating hbase:meta row=0bfb5df214fbf62f39e23b26ca694374, regionState=OPEN, openSeqNum=2, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:29:54,828 DEBUG [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,828 INFO [RS_OPEN_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_OPEN_REGION, pid=196}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:54,830 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0bfb5df214fbf62f39e23b26ca694374, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:29:54,834 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=197, resume processing ppid=194 2024-11-15T08:29:54,836 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=194, state=SUCCESS, hasLock=false; OpenRegionProcedure d964013f8c33adbdff757626b30f3756, server=48e8ad8dc936,37511,1731659175104 in 194 msec 2024-11-15T08:29:54,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-11-15T08:29:54,837 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, ASSIGN in 359 msec 2024-11-15T08:29:54,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; OpenRegionProcedure 0bfb5df214fbf62f39e23b26ca694374, server=48e8ad8dc936,40637,1731659175269 in 201 msec 2024-11-15T08:29:54,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=195, resume processing ppid=193 2024-11-15T08:29:54,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=193, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, ASSIGN in 361 msec 2024-11-15T08:29:54,843 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-15T08:29:54,843 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659394843"}]},"ts":"1731659394843"} 2024-11-15T08:29:54,845 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-15T08:29:54,847 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=193, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-15T08:29:54,848 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-15T08:29:54,851 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-15T08:29:54,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,855 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,856 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:29:54,858 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,859 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:54,860 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:54,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,860 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,863 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,863 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-15T08:29:54,866 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 462 msec 2024-11-15T08:29:55,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=193 2024-11-15T08:29:55,026 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T08:29:55,027 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-11-15T08:29:55,027 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:29:55,034 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-11-15T08:29:55,035 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:29:55,035 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-11-15T08:29:55,035 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T08:29:55,042 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T08:29:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659395042 (current time:1731659395042). 2024-11-15T08:29:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:29:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-15T08:29:55,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:29:55,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61e607a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:29:55,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:29:55,047 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:29:55,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:29:55,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:29:55,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ee335cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:29:55,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:29:55,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,049 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:29:55,050 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69960d56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:29:55,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:29:55,052 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:29:55,053 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47374, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:29:55,056 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:29:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:29:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,057 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:29:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e873e6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:29:55,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:29:55,062 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:29:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:29:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:29:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbaf06f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:29:55,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:29:55,063 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,063 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36462, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:29:55,064 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@286f7315, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:29:55,066 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:29:55,067 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:29:55,068 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47380, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:29:55,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:29:55,072 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:29:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:29:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,072 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:29:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-15T08:29:55,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:29:55,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T08:29:55,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-15T08:29:55,077 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:29:55,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T08:29:55,078 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:29:55,082 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:29:55,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742300_1476 (size=203) 2024-11-15T08:29:55,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742300_1476 (size=203) 2024-11-15T08:29:55,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742300_1476 (size=203) 2024-11-15T08:29:55,106 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:29:55,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756}, {pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374}] 2024-11-15T08:29:55,108 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:55,108 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:55,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T08:29:55,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=199 2024-11-15T08:29:55,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:55,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.HRegion(2603): Flush status journal for d964013f8c33adbdff757626b30f3756: 2024-11-15T08:29:55,261 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T08:29:55,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:55,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:29:55,262 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:29:55,262 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=200 2024-11-15T08:29:55,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:55,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.HRegion(2603): Flush status journal for 0bfb5df214fbf62f39e23b26ca694374: 2024-11-15T08:29:55,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T08:29:55,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:55,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:29:55,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-15T08:29:55,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742301_1477 (size=82) 2024-11-15T08:29:55,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742301_1477 (size=82) 2024-11-15T08:29:55,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742301_1477 (size=82) 2024-11-15T08:29:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:55,302 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=199}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=199 2024-11-15T08:29:55,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=199 2024-11-15T08:29:55,303 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:55,303 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:55,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742302_1478 (size=82) 2024-11-15T08:29:55,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742302_1478 (size=82) 2024-11-15T08:29:55,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742302_1478 (size=82) 2024-11-15T08:29:55,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:55,314 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=200}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=200 2024-11-15T08:29:55,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=200 2024-11-15T08:29:55,315 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:55,315 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:55,320 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756 in 211 msec 2024-11-15T08:29:55,321 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:29:55,322 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=200, resume processing ppid=198 2024-11-15T08:29:55,322 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:29:55,323 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:29:55,324 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:55,324 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374 in 212 msec 2024-11-15T08:29:55,324 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:55,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742303_1479 (size=585) 2024-11-15T08:29:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742303_1479 (size=585) 2024-11-15T08:29:55,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742303_1479 (size=585) 2024-11-15T08:29:55,378 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:29:55,391 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:29:55,392 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:55,394 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=198, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:29:55,394 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 198 2024-11-15T08:29:55,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T08:29:55,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=198, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 320 msec 2024-11-15T08:29:55,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-15T08:29:55,707 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T08:29:55,715 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='0bfba3a8c8c5c2ddd290a780379d00cee', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756., hostname=48e8ad8dc936,37511,1731659175104, seqNum=2] 2024-11-15T08:29:55,716 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='1ad70dc441d2e81fe01e71a0390a78715', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:29:55,717 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='208767c7f78a8195f6bad743c560d8e22', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:29:55,720 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='38220ca6d8bed4772b4440e99a773abd2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:29:55,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37511 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:29:55,728 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40637 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. with WAL disabled. Data may be lost in the event of a crash. 2024-11-15T08:29:55,729 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T08:29:55,732 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:55,732 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:55,732 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-15T08:29:55,734 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T08:29:55,739 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T08:29:55,745 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-15T08:29:55,747 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T08:29:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1731659395747 (current time:1731659395747). 2024-11-15T08:29:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-15T08:29:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-15T08:29:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-15T08:29:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e5ab3dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:29:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:29:55,749 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:29:55,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:29:55,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:29:55,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b3df662, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:29:55,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:29:55,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,751 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:29:55,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@746aa2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:29:55,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:29:55,753 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:29:55,754 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47384, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:29:55,755 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:29:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:29:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,755 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:29:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71112aba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ClusterIdFetcher(90): Going to request 48e8ad8dc936,35169,-1 for getting cluster id 2024-11-15T08:29:55,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-15T08:29:55,757 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ba5ab714-38eb-44a8-a856-45e1f3ffcc35' 2024-11-15T08:29:55,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-15T08:29:55,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ba5ab714-38eb-44a8-a856-45e1f3ffcc35" 2024-11-15T08:29:55,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6021cc5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [48e8ad8dc936,35169,-1] 2024-11-15T08:29:55,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-15T08:29:55,758 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,759 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-15T08:29:55,760 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48cc8694, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-15T08:29:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-15T08:29:55,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=48e8ad8dc936,40637,1731659175269, seqNum=-1] 2024-11-15T08:29:55,762 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-15T08:29:55,763 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47398, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-15T08:29:55,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., hostname=48e8ad8dc936,40637,1731659175269, seqNum=2] 2024-11-15T08:29:55,766 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169. 2024-11-15T08:29:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor239.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-15T08:29:55,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:29:55,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-15T08:29:55,768 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:29:55,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-15T08:29:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-15T08:29:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-15T08:29:55,771 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-15T08:29:55,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T08:29:55,772 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-15T08:29:55,775 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-15T08:29:55,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742304_1480 (size=198) 2024-11-15T08:29:55,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742304_1480 (size=198) 2024-11-15T08:29:55,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742304_1480 (size=198) 2024-11-15T08:29:55,787 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-15T08:29:55,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756}, {pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374}] 2024-11-15T08:29:55,789 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:55,789 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:55,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T08:29:55,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=40637 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=203 2024-11-15T08:29:55,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37511 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-11-15T08:29:55,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:55,940 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:55,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2902): Flushing d964013f8c33adbdff757626b30f3756 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-15T08:29:55,940 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2902): Flushing 0bfb5df214fbf62f39e23b26ca694374 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-15T08:29:55,959 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/.tmp/cf/4fb84e8eeaf34eb8be62c390da691b70 is 71, key is 0baf840b4b9f70b6362dd4337ff5770b/cf:q/1731659395724/Put/seqid=0 2024-11-15T08:29:55,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/.tmp/cf/cac7b9e07601464bab66aac2697681a1 is 71, key is 151445514c352f0b4bde5503056cd3da/cf:q/1731659395728/Put/seqid=0 2024-11-15T08:29:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742305_1481 (size=5286) 2024-11-15T08:29:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742305_1481 (size=5286) 2024-11-15T08:29:55,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742305_1481 (size=5286) 2024-11-15T08:29:55,984 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/.tmp/cf/4fb84e8eeaf34eb8be62c390da691b70 2024-11-15T08:29:55,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/.tmp/cf/4fb84e8eeaf34eb8be62c390da691b70 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf/4fb84e8eeaf34eb8be62c390da691b70 2024-11-15T08:29:56,005 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf/4fb84e8eeaf34eb8be62c390da691b70, entries=3, sequenceid=6, filesize=5.2 K 2024-11-15T08:29:56,006 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d964013f8c33adbdff757626b30f3756 in 66ms, sequenceid=6, compaction requested=false 2024-11-15T08:29:56,006 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-15T08:29:56,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for d964013f8c33adbdff757626b30f3756: 2024-11-15T08:29:56,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T08:29:56,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:29:56,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf/4fb84e8eeaf34eb8be62c390da691b70] hfiles 2024-11-15T08:29:56,007 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf/4fb84e8eeaf34eb8be62c390da691b70 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742306_1482 (size=8324) 2024-11-15T08:29:56,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742306_1482 (size=8324) 2024-11-15T08:29:56,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742306_1482 (size=8324) 2024-11-15T08:29:56,038 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/.tmp/cf/cac7b9e07601464bab66aac2697681a1 2024-11-15T08:29:56,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/.tmp/cf/cac7b9e07601464bab66aac2697681a1 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf/cac7b9e07601464bab66aac2697681a1 2024-11-15T08:29:56,058 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf/cac7b9e07601464bab66aac2697681a1, entries=47, sequenceid=6, filesize=8.1 K 2024-11-15T08:29:56,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 0bfb5df214fbf62f39e23b26ca694374 in 119ms, sequenceid=6, compaction requested=false 2024-11-15T08:29:56,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.HRegion(2603): Flush status journal for 0bfb5df214fbf62f39e23b26ca694374: 2024-11-15T08:29:56,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-15T08:29:56,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-15T08:29:56,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf/cac7b9e07601464bab66aac2697681a1] hfiles 2024-11-15T08:29:56,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf/cac7b9e07601464bab66aac2697681a1 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T08:29:56,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742307_1483 (size=121) 2024-11-15T08:29:56,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742307_1483 (size=121) 2024-11-15T08:29:56,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742307_1483 (size=121) 2024-11-15T08:29:56,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:29:56,091 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-11-15T08:29:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-11-15T08:29:56,092 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:56,092 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756 2024-11-15T08:29:56,096 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d964013f8c33adbdff757626b30f3756 in 306 msec 2024-11-15T08:29:56,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742308_1484 (size=121) 2024-11-15T08:29:56,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742308_1484 (size=121) 2024-11-15T08:29:56,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742308_1484 (size=121) 2024-11-15T08:29:56,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:29:56,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/48e8ad8dc936:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=203}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=203 2024-11-15T08:29:56,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster(4169): Remote procedure done, pid=203 2024-11-15T08:29:56,115 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:56,115 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=203, ppid=201, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:29:56,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=203, resume processing ppid=201 2024-11-15T08:29:56,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, ppid=201, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 0bfb5df214fbf62f39e23b26ca694374 in 329 msec 2024-11-15T08:29:56,120 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-15T08:29:56,122 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-15T08:29:56,123 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-15T08:29:56,124 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,124 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742309_1485 (size=663) 2024-11-15T08:29:56,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742309_1485 (size=663) 2024-11-15T08:29:56,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742309_1485 (size=663) 2024-11-15T08:29:56,151 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-15T08:29:56,156 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-15T08:29:56,157 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,158 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=201, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-15T08:29:56,158 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 201 2024-11-15T08:29:56,163 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=201, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 389 msec 2024-11-15T08:29:56,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=201 2024-11-15T08:29:56,396 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T08:29:56,396 INFO [Time-limited test {}] snapshot.TestExportSnapshot(467): HDFS export destination path: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396 2024-11-15T08:29:56,396 INFO [Time-limited test {}] snapshot.TestExportSnapshot(494): tgtFsUri=hdfs://localhost:44023, tgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396, rawTgtDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396, srcFsUri=hdfs://localhost:44023, srcDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:29:56,429 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1085): inputFs=hdfs://localhost:44023, inputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1 2024-11-15T08:29:56,429 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1086): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,431 INFO [Time-limited test {}] snapshot.ExportSnapshot(1095): Verify the source snapshot's expiration status and integrity. 2024-11-15T08:29:56,436 INFO [Time-limited test {}] snapshot.ExportSnapshot(1153): Copy Snapshot Manifest from hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:29:56,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742310_1486 (size=198) 2024-11-15T08:29:56,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742310_1486 (size=198) 2024-11-15T08:29:56,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742310_1486 (size=198) 2024-11-15T08:29:56,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742311_1487 (size=663) 2024-11-15T08:29:56,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742311_1487 (size=663) 2024-11-15T08:29:56,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742311_1487 (size=663) 2024-11-15T08:29:56,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:56,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:56,867 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:57,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-13500052187729402959.jar 2024-11-15T08:29:57,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:57,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,020 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop-16213657360914125440.jar 2024-11-15T08:29:58,020 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,020 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-15T08:29:58,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-15T08:29:58,021 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-15T08:29:58,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-15T08:29:58,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-15T08:29:58,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-15T08:29:58,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-15T08:29:58,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-15T08:29:58,022 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-15T08:29:58,023 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-15T08:29:58,023 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-15T08:29:58,023 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-15T08:29:58,023 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:58,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:58,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:29:58,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:58,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-15T08:29:58,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:29:58,024 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-15T08:29:58,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742312_1488 (size=131440) 2024-11-15T08:29:58,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742312_1488 (size=131440) 2024-11-15T08:29:58,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742312_1488 (size=131440) 2024-11-15T08:29:58,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742313_1489 (size=4188619) 2024-11-15T08:29:58,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742313_1489 (size=4188619) 2024-11-15T08:29:58,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742313_1489 (size=4188619) 2024-11-15T08:29:58,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742314_1490 (size=1323991) 2024-11-15T08:29:58,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742314_1490 (size=1323991) 2024-11-15T08:29:58,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742314_1490 (size=1323991) 2024-11-15T08:29:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742315_1491 (size=903738) 2024-11-15T08:29:58,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742315_1491 (size=903738) 2024-11-15T08:29:58,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742315_1491 (size=903738) 2024-11-15T08:29:58,155 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0009_000001 (auth:SIMPLE) from 127.0.0.1:41658 2024-11-15T08:29:58,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742316_1492 (size=8360083) 2024-11-15T08:29:58,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742316_1492 (size=8360083) 2024-11-15T08:29:58,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742316_1492 (size=8360083) 2024-11-15T08:29:58,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742317_1493 (size=440656) 2024-11-15T08:29:58,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742317_1493 (size=440656) 2024-11-15T08:29:58,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742317_1493 (size=440656) 2024-11-15T08:29:58,170 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0009/container_1731659181416_0009_01_000001/launch_container.sh] 2024-11-15T08:29:58,171 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0009/container_1731659181416_0009_01_000001/container_tokens] 2024-11-15T08:29:58,171 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0009/container_1731659181416_0009_01_000001/sysfs] 2024-11-15T08:29:58,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742318_1494 (size=1877034) 2024-11-15T08:29:58,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742318_1494 (size=1877034) 2024-11-15T08:29:58,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742318_1494 (size=1877034) 2024-11-15T08:29:58,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742319_1495 (size=77835) 2024-11-15T08:29:58,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742319_1495 (size=77835) 2024-11-15T08:29:58,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742319_1495 (size=77835) 2024-11-15T08:29:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742320_1496 (size=6424751) 2024-11-15T08:29:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742320_1496 (size=6424751) 2024-11-15T08:29:58,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742320_1496 (size=6424751) 2024-11-15T08:29:58,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742321_1497 (size=30949) 2024-11-15T08:29:58,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742321_1497 (size=30949) 2024-11-15T08:29:58,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742321_1497 (size=30949) 2024-11-15T08:29:58,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742322_1498 (size=1597327) 2024-11-15T08:29:58,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742322_1498 (size=1597327) 2024-11-15T08:29:58,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742322_1498 (size=1597327) 2024-11-15T08:29:58,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742323_1499 (size=4695811) 2024-11-15T08:29:58,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742323_1499 (size=4695811) 2024-11-15T08:29:58,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742323_1499 (size=4695811) 2024-11-15T08:29:58,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742324_1500 (size=232957) 2024-11-15T08:29:58,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742324_1500 (size=232957) 2024-11-15T08:29:58,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742324_1500 (size=232957) 2024-11-15T08:29:58,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742325_1501 (size=127628) 2024-11-15T08:29:58,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742325_1501 (size=127628) 2024-11-15T08:29:58,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742325_1501 (size=127628) 2024-11-15T08:29:58,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742326_1502 (size=20406) 2024-11-15T08:29:58,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742326_1502 (size=20406) 2024-11-15T08:29:58,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742326_1502 (size=20406) 2024-11-15T08:29:58,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742327_1503 (size=5175431) 2024-11-15T08:29:58,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742327_1503 (size=5175431) 2024-11-15T08:29:58,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742327_1503 (size=5175431) 2024-11-15T08:29:58,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742328_1504 (size=217634) 2024-11-15T08:29:58,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742328_1504 (size=217634) 2024-11-15T08:29:58,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742328_1504 (size=217634) 2024-11-15T08:29:58,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742329_1505 (size=1832290) 2024-11-15T08:29:58,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742329_1505 (size=1832290) 2024-11-15T08:29:58,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742329_1505 (size=1832290) 2024-11-15T08:29:58,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742330_1506 (size=322274) 2024-11-15T08:29:58,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742330_1506 (size=322274) 2024-11-15T08:29:58,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742330_1506 (size=322274) 2024-11-15T08:29:58,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742331_1507 (size=503880) 2024-11-15T08:29:58,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742331_1507 (size=503880) 2024-11-15T08:29:58,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742331_1507 (size=503880) 2024-11-15T08:29:58,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742332_1508 (size=29229) 2024-11-15T08:29:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742332_1508 (size=29229) 2024-11-15T08:29:58,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742332_1508 (size=29229) 2024-11-15T08:29:58,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742333_1509 (size=24096) 2024-11-15T08:29:58,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742333_1509 (size=24096) 2024-11-15T08:29:58,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742333_1509 (size=24096) 2024-11-15T08:29:58,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742334_1510 (size=111872) 2024-11-15T08:29:58,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742334_1510 (size=111872) 2024-11-15T08:29:58,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742334_1510 (size=111872) 2024-11-15T08:29:58,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742335_1511 (size=45609) 2024-11-15T08:29:58,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742335_1511 (size=45609) 2024-11-15T08:29:58,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742335_1511 (size=45609) 2024-11-15T08:29:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742336_1512 (size=136454) 2024-11-15T08:29:58,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742336_1512 (size=136454) 2024-11-15T08:29:58,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742336_1512 (size=136454) 2024-11-15T08:29:58,762 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-15T08:29:58,764 INFO [Time-limited test {}] snapshot.ExportSnapshot(661): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-15T08:29:58,766 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(754): export split=0 size=13.3 K 2024-11-15T08:29:58,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742337_1513 (size=366) 2024-11-15T08:29:58,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742337_1513 (size=366) 2024-11-15T08:29:58,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742337_1513 (size=366) 2024-11-15T08:29:58,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742338_1514 (size=15) 2024-11-15T08:29:58,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742338_1514 (size=15) 2024-11-15T08:29:58,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742338_1514 (size=15) 2024-11-15T08:29:58,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742339_1515 (size=303906) 2024-11-15T08:29:58,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742339_1515 (size=303906) 2024-11-15T08:29:58,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742339_1515 (size=303906) 2024-11-15T08:29:58,828 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:29:58,829 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-15T08:29:59,147 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0010_000001 (auth:SIMPLE) from 127.0.0.1:41668 2024-11-15T08:29:59,577 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:30:02,658 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4746d1a25f8d5d89e0a76786d0716410, had cached 0 bytes from a total of 5286 2024-11-15T08:30:02,659 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1f91fa0aee2cc478532b519b97469773, had cached 0 bytes from a total of 8324 2024-11-15T08:30:04,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:04,710 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-15T08:30:04,995 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0010_000001 (auth:SIMPLE) from 127.0.0.1:51634 2024-11-15T08:30:05,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742340_1516 (size=349580) 2024-11-15T08:30:05,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742340_1516 (size=349580) 2024-11-15T08:30:05,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742340_1516 (size=349580) 2024-11-15T08:30:07,429 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0010_000001 (auth:SIMPLE) from 127.0.0.1:42146 2024-11-15T08:30:12,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742341_1517 (size=8324) 2024-11-15T08:30:12,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742341_1517 (size=8324) 2024-11-15T08:30:12,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742341_1517 (size=8324) 2024-11-15T08:30:12,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742342_1518 (size=5286) 2024-11-15T08:30:12,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742342_1518 (size=5286) 2024-11-15T08:30:12,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742342_1518 (size=5286) 2024-11-15T08:30:12,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742343_1519 (size=17455) 2024-11-15T08:30:12,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742343_1519 (size=17455) 2024-11-15T08:30:12,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742343_1519 (size=17455) 2024-11-15T08:30:12,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742344_1520 (size=476) 2024-11-15T08:30:12,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742344_1520 (size=476) 2024-11-15T08:30:12,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742344_1520 (size=476) 2024-11-15T08:30:12,381 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0010/container_1731659181416_0010_01_000002/launch_container.sh] 2024-11-15T08:30:12,382 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0010/container_1731659181416_0010_01_000002/container_tokens] 2024-11-15T08:30:12,382 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_0/usercache/jenkins/appcache/application_1731659181416_0010/container_1731659181416_0010_01_000002/sysfs] 2024-11-15T08:30:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742345_1521 (size=17455) 2024-11-15T08:30:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742345_1521 (size=17455) 2024-11-15T08:30:12,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742345_1521 (size=17455) 2024-11-15T08:30:12,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742346_1522 (size=349580) 2024-11-15T08:30:12,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742346_1522 (size=349580) 2024-11-15T08:30:12,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742346_1522 (size=349580) 2024-11-15T08:30:12,825 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0010_000001 (auth:SIMPLE) from 127.0.0.1:41728 2024-11-15T08:30:13,478 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:30:13,981 INFO [Time-limited test {}] snapshot.ExportSnapshot(1210): Finalize the Snapshot Export 2024-11-15T08:30:13,981 INFO [Time-limited test {}] snapshot.ExportSnapshot(1221): Verify the exported snapshot's expiration status and integrity. 2024-11-15T08:30:13,987 INFO [Time-limited test {}] snapshot.ExportSnapshot(1227): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:13,987 INFO [Time-limited test {}] snapshot.TestExportSnapshot(361): Exported snapshot 2024-11-15T08:30:13,987 INFO [Time-limited test {}] snapshot.TestExportSnapshot(372): Verified filesystem state 2024-11-15T08:30:13,987 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:13,988 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-15T08:30:13,988 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-15T08:30:13,988 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(447): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_2123625332_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:13,988 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-15T08:30:13,989 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(452): hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/export-test/export-1731659396396/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-15T08:30:13,994 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=204, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:13,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-15T08:30:13,997 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659413997"}]},"ts":"1731659413997"} 2024-11-15T08:30:13,999 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-15T08:30:13,999 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-15T08:30:14,000 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=205, ppid=204, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-15T08:30:14,001 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, UNASSIGN}, {pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, UNASSIGN}] 2024-11-15T08:30:14,002 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, UNASSIGN 2024-11-15T08:30:14,002 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, UNASSIGN 2024-11-15T08:30:14,003 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=d964013f8c33adbdff757626b30f3756, regionState=CLOSING, regionLocation=48e8ad8dc936,37511,1731659175104 2024-11-15T08:30:14,003 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=0bfb5df214fbf62f39e23b26ca694374, regionState=CLOSING, regionLocation=48e8ad8dc936,40637,1731659175269 2024-11-15T08:30:14,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=206, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, UNASSIGN because future has completed 2024-11-15T08:30:14,005 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:30:14,005 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure d964013f8c33adbdff757626b30f3756, server=48e8ad8dc936,37511,1731659175104}] 2024-11-15T08:30:14,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=207, ppid=205, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, UNASSIGN because future has completed 2024-11-15T08:30:14,005 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-15T08:30:14,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0bfb5df214fbf62f39e23b26ca694374, server=48e8ad8dc936,40637,1731659175269}] 2024-11-15T08:30:14,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-15T08:30:14,157 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(122): Close d964013f8c33adbdff757626b30f3756 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1722): Closing d964013f8c33adbdff757626b30f3756, disabling compactions & flushes 2024-11-15T08:30:14,158 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. after waiting 0 ms 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:30:14,158 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(122): Close 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1722): Closing 0bfb5df214fbf62f39e23b26ca694374, disabling compactions & flushes 2024-11-15T08:30:14,158 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. after waiting 0 ms 2024-11-15T08:30:14,158 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:30:14,163 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:30:14,163 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:14,163 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756. 2024-11-15T08:30:14,163 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-15T08:30:14,163 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] regionserver.HRegion(1676): Region close journal for d964013f8c33adbdff757626b30f3756: Waiting for close lock at 1731659414158Running coprocessor pre-close hooks at 1731659414158Disabling compacts and flushes for region at 1731659414158Disabling writes for close at 1731659414158Writing region close event to WAL at 1731659414159 (+1 ms)Running coprocessor post-close hooks at 1731659414163 (+4 ms)Closed at 1731659414163 2024-11-15T08:30:14,164 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:14,164 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374. 2024-11-15T08:30:14,164 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] regionserver.HRegion(1676): Region close journal for 0bfb5df214fbf62f39e23b26ca694374: Waiting for close lock at 1731659414158Running coprocessor pre-close hooks at 1731659414158Disabling compacts and flushes for region at 1731659414158Disabling writes for close at 1731659414158Writing region close event to WAL at 1731659414160 (+2 ms)Running coprocessor post-close hooks at 1731659414164 (+4 ms)Closed at 1731659414164 2024-11-15T08:30:14,165 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=208}] handler.UnassignRegionHandler(157): Closed d964013f8c33adbdff757626b30f3756 2024-11-15T08:30:14,166 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=206 updating hbase:meta row=d964013f8c33adbdff757626b30f3756, regionState=CLOSED 2024-11-15T08:30:14,166 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION, pid=209}] handler.UnassignRegionHandler(157): Closed 0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:30:14,166 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=207 updating hbase:meta row=0bfb5df214fbf62f39e23b26ca694374, regionState=CLOSED 2024-11-15T08:30:14,167 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=206, state=RUNNABLE, hasLock=false; CloseRegionProcedure d964013f8c33adbdff757626b30f3756, server=48e8ad8dc936,37511,1731659175104 because future has completed 2024-11-15T08:30:14,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0bfb5df214fbf62f39e23b26ca694374, server=48e8ad8dc936,40637,1731659175269 because future has completed 2024-11-15T08:30:14,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=208, resume processing ppid=206 2024-11-15T08:30:14,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; CloseRegionProcedure d964013f8c33adbdff757626b30f3756, server=48e8ad8dc936,37511,1731659175104 in 163 msec 2024-11-15T08:30:14,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-11-15T08:30:14,171 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; CloseRegionProcedure 0bfb5df214fbf62f39e23b26ca694374, server=48e8ad8dc936,40637,1731659175269 in 163 msec 2024-11-15T08:30:14,171 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=d964013f8c33adbdff757626b30f3756, UNASSIGN in 169 msec 2024-11-15T08:30:14,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=205 2024-11-15T08:30:14,173 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=205, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=0bfb5df214fbf62f39e23b26ca694374, UNASSIGN in 170 msec 2024-11-15T08:30:14,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=204 2024-11-15T08:30:14,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=204, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 174 msec 2024-11-15T08:30:14,177 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731659414176"}]},"ts":"1731659414176"} 2024-11-15T08:30:14,178 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-15T08:30:14,178 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-15T08:30:14,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 184 msec 2024-11-15T08:30:14,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=204 2024-11-15T08:30:14,316 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T08:30:14,317 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] procedure2.ProcedureExecutor(1139): Stored pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,319 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=210, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,321 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=210, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,323 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40637 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,329 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756 2024-11-15T08:30:14,329 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:30:14,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,330 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T08:30:14,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T08:30:14,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T08:30:14,331 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:30:14,333 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/recovered.edits] 2024-11-15T08:30:14,333 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf, FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/recovered.edits] 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,333 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-15T08:30:14,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:30:14,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-15T08:30:14,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:30:14,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:30:14,335 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-15T08:30:14,339 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf/4fb84e8eeaf34eb8be62c390da691b70 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/cf/4fb84e8eeaf34eb8be62c390da691b70 2024-11-15T08:30:14,342 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756/recovered.edits/9.seqid 2024-11-15T08:30:14,343 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf/cac7b9e07601464bab66aac2697681a1 to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/cf/cac7b9e07601464bab66aac2697681a1 2024-11-15T08:30:14,343 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/d964013f8c33adbdff757626b30f3756 2024-11-15T08:30:14,346 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/recovered.edits/9.seqid to hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374/recovered.edits/9.seqid 2024-11-15T08:30:14,346 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testtb-testExportFileSystemStateWithSkipTmp/0bfb5df214fbf62f39e23b26ca694374 2024-11-15T08:30:14,346 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-15T08:30:14,348 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=210, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,351 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-15T08:30:14,353 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-15T08:30:14,355 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=210, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,355 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-15T08:30:14,355 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659414355"}]},"ts":"9223372036854775807"} 2024-11-15T08:30:14,355 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731659414355"}]},"ts":"9223372036854775807"} 2024-11-15T08:30:14,357 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-15T08:30:14,357 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d964013f8c33adbdff757626b30f3756, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1731659394399.d964013f8c33adbdff757626b30f3756.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0bfb5df214fbf62f39e23b26ca694374, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1731659394399.0bfb5df214fbf62f39e23b26ca694374.', STARTKEY => '1', ENDKEY => ''}] 2024-11-15T08:30:14,357 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-15T08:30:14,357 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731659414357"}]},"ts":"9223372036854775807"} 2024-11-15T08:30:14,359 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-15T08:30:14,359 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=210, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,361 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 42 msec 2024-11-15T08:30:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=210 2024-11-15T08:30:14,446 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,446 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-15T08:30:14,453 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-15T08:30:14,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,459 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-15T08:30:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:14,492 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=805 (was 801) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1132595896_1 at /127.0.0.1:47920 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33755 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7014 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:58522 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1655466367) connection to localhost/127.0.0.1:33755 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: process reaper (pid 29672) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:33382 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1132595896_1 at /127.0.0.1:58514 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2123625332_22 at /127.0.0.1:47950 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=799 (was 802), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=719 (was 673) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 20), AvailableMemoryMB=1386 (was 1372) - AvailableMemoryMB LEAK? - 2024-11-15T08:30:14,492 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-15T08:30:14,492 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-15T08:30:14,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@714e385f{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T08:30:14,504 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2696985a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:30:14,504 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:30:14,504 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@607efe43{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T08:30:14,505 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c70a759{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:30:14,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-15T08:30:16,962 WARN [regionserver/48e8ad8dc936:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 1 2024-11-15T08:30:18,901 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1731659181416_0010_000001 (auth:SIMPLE) from 127.0.0.1:41744 2024-11-15T08:30:18,913 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_2/usercache/jenkins/appcache/application_1731659181416_0010/container_1731659181416_0010_01_000001/launch_container.sh] 2024-11-15T08:30:18,913 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_2/usercache/jenkins/appcache/application_1731659181416_0010/container_1731659181416_0010_01_000001/container_tokens] 2024-11-15T08:30:18,913 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1970259410/yarn-4743498861/MiniMRCluster_1970259410-localDir-nm-0_2/usercache/jenkins/appcache/application_1731659181416_0010/container_1731659181416_0010_01_000001/sysfs] 2024-11-15T08:30:19,697 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:30:31,518 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4979e977{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-15T08:30:31,519 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a1a80b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:30:31,519 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:30:31,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57c6a7f7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T08:30:31,519 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fefcaab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:30:43,478 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:30:47,658 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 4746d1a25f8d5d89e0a76786d0716410, had cached 0 bytes from a total of 5286 2024-11-15T08:30:47,659 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1f91fa0aee2cc478532b519b97469773, had cached 0 bytes from a total of 8324 2024-11-15T08:30:48,528 ERROR [Thread[Thread-403,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-15T08:30:48,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@f629ad8{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-15T08:30:48,529 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67837fd5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:30:48,529 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:30:48,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7709f05b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T08:30:48,529 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2526877c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:30:48,533 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-15T08:30:48,537 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-15T08:30:48,537 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-15T08:30:48,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741830_1006 (size=969041) 2024-11-15T08:30:48,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741830_1006 (size=969041) 2024-11-15T08:30:48,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741830_1006 (size=969041) 2024-11-15T08:30:48,542 ERROR [Thread[Thread-426,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-15T08:30:48,545 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@86a27da{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-15T08:30:48,546 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d3325dd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:30:48,547 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:30:48,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74822f8a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-15T08:30:48,547 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a6506b2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:30:48,549 ERROR [Thread[Thread-385,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-15T08:30:48,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-15T08:30:48,549 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-15T08:30:48,549 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T08:30:48,549 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T08:30:48,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,549 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-15T08:30:48,549 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-15T08:30:48,549 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1563154113, stopped=false 2024-11-15T08:30:48,550 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,550 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-15T08:30:48,550 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=48e8ad8dc936,35169,1731659174197 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T08:30:48,552 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:30:48,552 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:30:48,552 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-15T08:30:48,552 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T08:30:48,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:30:48,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:30:48,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:30:48,553 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '48e8ad8dc936,37511,1731659175104' ***** 2024-11-15T08:30:48,553 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-15T08:30:48,553 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,553 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T08:30:48,554 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '48e8ad8dc936,40637,1731659175269' ***** 2024-11-15T08:30:48,554 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,554 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T08:30:48,554 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T08:30:48,554 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '48e8ad8dc936,34555,1731659175344' ***** 2024-11-15T08:30:48,554 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,554 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-15T08:30:48,554 INFO [RS:0;48e8ad8dc936:37511 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T08:30:48,554 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T08:30:48,554 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T08:30:48,554 INFO [RS:1;48e8ad8dc936:40637 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T08:30:48,554 INFO [RS:0;48e8ad8dc936:37511 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T08:30:48,554 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T08:30:48,554 INFO [RS:1;48e8ad8dc936:40637 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T08:30:48,554 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(3091): Received CLOSE for 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:30:48,554 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(3091): Received CLOSE for 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:30:48,554 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-15T08:30:48,554 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-15T08:30:48,554 INFO [RS:2;48e8ad8dc936:34555 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-15T08:30:48,554 INFO [RS:2;48e8ad8dc936:34555 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-15T08:30:48,555 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(3091): Received CLOSE for 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(959): stopping server 48e8ad8dc936,40637,1731659175269 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T08:30:48,555 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(959): stopping server 48e8ad8dc936,34555,1731659175344 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;48e8ad8dc936:40637. 2024-11-15T08:30:48,555 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T08:30:48,555 DEBUG [RS:1;48e8ad8dc936:40637 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T08:30:48,555 DEBUG [RS:1;48e8ad8dc936:40637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,555 INFO [RS:2;48e8ad8dc936:34555 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;48e8ad8dc936:34555. 2024-11-15T08:30:48,555 DEBUG [RS:2;48e8ad8dc936:34555 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T08:30:48,555 DEBUG [RS:2;48e8ad8dc936:34555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1f91fa0aee2cc478532b519b97469773, disabling compactions & flushes 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T08:30:48,555 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 626e7c98f2efa065104d27dc4f61e6c7, disabling compactions & flushes 2024-11-15T08:30:48,555 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-15T08:30:48,555 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. after waiting 0 ms 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:30:48,555 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:30:48,555 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1325): Online Regions={1f91fa0aee2cc478532b519b97469773=testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773.} 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. after waiting 0 ms 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:30:48,555 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(959): stopping server 48e8ad8dc936,37511,1731659175104 2024-11-15T08:30:48,555 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T08:30:48,555 INFO [RS:0;48e8ad8dc936:37511 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;48e8ad8dc936:37511. 2024-11-15T08:30:48,555 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 626e7c98f2efa065104d27dc4f61e6c7 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-11-15T08:30:48,555 DEBUG [RS:0;48e8ad8dc936:37511 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-15T08:30:48,555 DEBUG [RS:0;48e8ad8dc936:37511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,555 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4746d1a25f8d5d89e0a76786d0716410, disabling compactions & flushes 2024-11-15T08:30:48,555 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:30:48,556 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:30:48,556 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-15T08:30:48,556 DEBUG [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1351): Waiting on 1f91fa0aee2cc478532b519b97469773 2024-11-15T08:30:48,556 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. after waiting 0 ms 2024-11-15T08:30:48,556 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1325): Online Regions={4746d1a25f8d5d89e0a76786d0716410=testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410.} 2024-11-15T08:30:48,556 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:30:48,556 DEBUG [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1351): Waiting on 4746d1a25f8d5d89e0a76786d0716410 2024-11-15T08:30:48,557 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-15T08:30:48,557 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 626e7c98f2efa065104d27dc4f61e6c7=hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7.} 2024-11-15T08:30:48,557 DEBUG [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 626e7c98f2efa065104d27dc4f61e6c7 2024-11-15T08:30:48,557 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-15T08:30:48,557 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-15T08:30:48,557 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-15T08:30:48,557 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-15T08:30:48,557 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-15T08:30:48,557 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=69.66 KB heapSize=111.04 KB 2024-11-15T08:30:48,564 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/1f91fa0aee2cc478532b519b97469773/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T08:30:48,564 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/default/testExportExpiredSnapshot/4746d1a25f8d5d89e0a76786d0716410/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-15T08:30:48,564 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,565 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:30:48,565 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,565 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1f91fa0aee2cc478532b519b97469773: Waiting for close lock at 1731659448555Running coprocessor pre-close hooks at 1731659448555Disabling compacts and flushes for region at 1731659448555Disabling writes for close at 1731659448555Writing region close event to WAL at 1731659448560 (+5 ms)Running coprocessor post-close hooks at 1731659448564 (+4 ms)Closed at 1731659448564 2024-11-15T08:30:48,565 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:30:48,565 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4746d1a25f8d5d89e0a76786d0716410: Waiting for close lock at 1731659448555Running coprocessor pre-close hooks at 1731659448555Disabling compacts and flushes for region at 1731659448555Disabling writes for close at 1731659448556 (+1 ms)Writing region close event to WAL at 1731659448560 (+4 ms)Running coprocessor post-close hooks at 1731659448564 (+4 ms)Closed at 1731659448565 (+1 ms) 2024-11-15T08:30:48,565 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773. 2024-11-15T08:30:48,565 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1731659312308.4746d1a25f8d5d89e0a76786d0716410. 2024-11-15T08:30:48,574 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/.tmp/l/598a565ec9684069ae8d0cdea7287b68 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1731659310350/DeleteFamily/seqid=0 2024-11-15T08:30:48,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742347_1523 (size=5695) 2024-11-15T08:30:48,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742347_1523 (size=5695) 2024-11-15T08:30:48,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742347_1523 (size=5695) 2024-11-15T08:30:48,580 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/.tmp/l/598a565ec9684069ae8d0cdea7287b68 2024-11-15T08:30:48,581 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/info/7c83745a2a584fa0a747ebb21db89021 is 173, key is testExportExpiredSnapshot,1,1731659312308.1f91fa0aee2cc478532b519b97469773./info:regioninfo/1731659312670/Put/seqid=0 2024-11-15T08:30:48,584 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 598a565ec9684069ae8d0cdea7287b68 2024-11-15T08:30:48,585 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/.tmp/l/598a565ec9684069ae8d0cdea7287b68 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/l/598a565ec9684069ae8d0cdea7287b68 2024-11-15T08:30:48,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742348_1524 (size=14362) 2024-11-15T08:30:48,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742348_1524 (size=14362) 2024-11-15T08:30:48,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742348_1524 (size=14362) 2024-11-15T08:30:48,589 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=59.12 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/info/7c83745a2a584fa0a747ebb21db89021 2024-11-15T08:30:48,591 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 598a565ec9684069ae8d0cdea7287b68 2024-11-15T08:30:48,591 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/l/598a565ec9684069ae8d0cdea7287b68, entries=12, sequenceid=27, filesize=5.6 K 2024-11-15T08:30:48,591 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 626e7c98f2efa065104d27dc4f61e6c7 in 36ms, sequenceid=27, compaction requested=false 2024-11-15T08:30:48,594 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/acl/626e7c98f2efa065104d27dc4f61e6c7/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-11-15T08:30:48,595 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,595 INFO [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:30:48,595 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 626e7c98f2efa065104d27dc4f61e6c7: Waiting for close lock at 1731659448555Running coprocessor pre-close hooks at 1731659448555Disabling compacts and flushes for region at 1731659448555Disabling writes for close at 1731659448555Obtaining lock to block concurrent updates at 1731659448555Preparing flush snapshotting stores in 626e7c98f2efa065104d27dc4f61e6c7 at 1731659448555Finished memstore snapshotting hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7., syncing WAL and waiting on mvcc, flushsize=dataSize=1412, getHeapSize=3392, getOffHeapSize=0, getCellsCount=23 at 1731659448556 (+1 ms)Flushing stores of hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. at 1731659448556Flushing 626e7c98f2efa065104d27dc4f61e6c7/l: creating writer at 1731659448557 (+1 ms)Flushing 626e7c98f2efa065104d27dc4f61e6c7/l: appending metadata at 1731659448574 (+17 ms)Flushing 626e7c98f2efa065104d27dc4f61e6c7/l: closing flushed file at 1731659448574Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2f845692: reopening flushed file at 1731659448584 (+10 ms)Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 626e7c98f2efa065104d27dc4f61e6c7 in 36ms, sequenceid=27, compaction requested=false at 1731659448591 (+7 ms)Writing region close event to WAL at 1731659448592 (+1 ms)Running coprocessor post-close hooks at 1731659448595 (+3 ms)Closed at 1731659448595 2024-11-15T08:30:48,595 DEBUG [RS_CLOSE_REGION-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1731659177999.626e7c98f2efa065104d27dc4f61e6c7. 2024-11-15T08:30:48,602 INFO [regionserver/48e8ad8dc936:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T08:30:48,607 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/ns/0ec7887bab8c42928fead97fd464a7f9 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247./ns:/1731659310369/DeleteFamily/seqid=0 2024-11-15T08:30:48,610 INFO [regionserver/48e8ad8dc936:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T08:30:48,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742349_1525 (size=7779) 2024-11-15T08:30:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742349_1525 (size=7779) 2024-11-15T08:30:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742349_1525 (size=7779) 2024-11-15T08:30:48,612 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.23 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/ns/0ec7887bab8c42928fead97fd464a7f9 2024-11-15T08:30:48,618 INFO [regionserver/48e8ad8dc936:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T08:30:48,630 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/rep_barrier/7cb6927c765b42c586d7447acc4f9433 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247./rep_barrier:/1731659310369/DeleteFamily/seqid=0 2024-11-15T08:30:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742350_1526 (size=8005) 2024-11-15T08:30:48,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742350_1526 (size=8005) 2024-11-15T08:30:48,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742350_1526 (size=8005) 2024-11-15T08:30:48,635 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/rep_barrier/7cb6927c765b42c586d7447acc4f9433 2024-11-15T08:30:48,653 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/table/2c5e7e0debbe4b199ae284da39b6eff7 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1731659292476.586daedb5286d7e2974fc4cbcd969247./table:/1731659310369/DeleteFamily/seqid=0 2024-11-15T08:30:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742351_1527 (size=8758) 2024-11-15T08:30:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742351_1527 (size=8758) 2024-11-15T08:30:48,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742351_1527 (size=8758) 2024-11-15T08:30:48,658 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.97 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/table/2c5e7e0debbe4b199ae284da39b6eff7 2024-11-15T08:30:48,663 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/info/7c83745a2a584fa0a747ebb21db89021 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/info/7c83745a2a584fa0a747ebb21db89021 2024-11-15T08:30:48,666 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/info/7c83745a2a584fa0a747ebb21db89021, entries=74, sequenceid=199, filesize=14.0 K 2024-11-15T08:30:48,667 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/ns/0ec7887bab8c42928fead97fd464a7f9 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/ns/0ec7887bab8c42928fead97fd464a7f9 2024-11-15T08:30:48,670 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/ns/0ec7887bab8c42928fead97fd464a7f9, entries=23, sequenceid=199, filesize=7.6 K 2024-11-15T08:30:48,670 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/rep_barrier/7cb6927c765b42c586d7447acc4f9433 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/rep_barrier/7cb6927c765b42c586d7447acc4f9433 2024-11-15T08:30:48,674 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/rep_barrier/7cb6927c765b42c586d7447acc4f9433, entries=21, sequenceid=199, filesize=7.8 K 2024-11-15T08:30:48,674 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/.tmp/table/2c5e7e0debbe4b199ae284da39b6eff7 as hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/table/2c5e7e0debbe4b199ae284da39b6eff7 2024-11-15T08:30:48,677 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/table/2c5e7e0debbe4b199ae284da39b6eff7, entries=36, sequenceid=199, filesize=8.6 K 2024-11-15T08:30:48,678 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 121ms, sequenceid=199, compaction requested=false 2024-11-15T08:30:48,682 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/data/hbase/meta/1588230740/recovered.edits/202.seqid, newMaxSeqId=202, maxSeqId=1 2024-11-15T08:30:48,682 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:48,682 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T08:30:48,682 INFO [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-15T08:30:48,682 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731659448557Running coprocessor pre-close hooks at 1731659448557Disabling compacts and flushes for region at 1731659448557Disabling writes for close at 1731659448557Obtaining lock to block concurrent updates at 1731659448557Preparing flush snapshotting stores in 1588230740 at 1731659448557Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=71334, getHeapSize=113640, getOffHeapSize=0, getCellsCount=548 at 1731659448558 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731659448558Flushing 1588230740/info: creating writer at 1731659448558Flushing 1588230740/info: appending metadata at 1731659448580 (+22 ms)Flushing 1588230740/info: closing flushed file at 1731659448580Flushing 1588230740/ns: creating writer at 1731659448593 (+13 ms)Flushing 1588230740/ns: appending metadata at 1731659448607 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731659448607Flushing 1588230740/rep_barrier: creating writer at 1731659448616 (+9 ms)Flushing 1588230740/rep_barrier: appending metadata at 1731659448630 (+14 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1731659448630Flushing 1588230740/table: creating writer at 1731659448639 (+9 ms)Flushing 1588230740/table: appending metadata at 1731659448653 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731659448653Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cfcddf3: reopening flushed file at 1731659448662 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@596c2480: reopening flushed file at 1731659448666 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15518cea: reopening flushed file at 1731659448670 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@455df97d: reopening flushed file at 1731659448674 (+4 ms)Finished flush of dataSize ~69.66 KB/71334, heapSize ~110.98 KB/113640, currentSize=0 B/0 for 1588230740 in 121ms, sequenceid=199, compaction requested=false at 1731659448678 (+4 ms)Writing region close event to WAL at 1731659448679 (+1 ms)Running coprocessor post-close hooks at 1731659448682 (+3 ms)Closed at 1731659448682 2024-11-15T08:30:48,682 DEBUG [RS_CLOSE_META-regionserver/48e8ad8dc936:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-15T08:30:48,756 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(976): stopping server 48e8ad8dc936,34555,1731659175344; all regions closed. 2024-11-15T08:30:48,756 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(976): stopping server 48e8ad8dc936,37511,1731659175104; all regions closed. 2024-11-15T08:30:48,757 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(976): stopping server 48e8ad8dc936,40637,1731659175269; all regions closed. 2024-11-15T08:30:48,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741836_1012 (size=81723) 2024-11-15T08:30:48,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741833_1009 (size=11950) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741836_1012 (size=81723) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741834_1010 (size=9269) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741836_1012 (size=81723) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741833_1009 (size=11950) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741834_1010 (size=9269) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741834_1010 (size=9269) 2024-11-15T08:30:48,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741833_1009 (size=11950) 2024-11-15T08:30:48,764 DEBUG [RS:2;48e8ad8dc936:34555 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs 2024-11-15T08:30:48,764 DEBUG [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs 2024-11-15T08:30:48,764 DEBUG [RS:0;48e8ad8dc936:37511 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs 2024-11-15T08:30:48,764 INFO [RS:2;48e8ad8dc936:34555 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 48e8ad8dc936%2C34555%2C1731659175344:(num 1731659177140) 2024-11-15T08:30:48,764 INFO [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 48e8ad8dc936%2C40637%2C1731659175269.meta:.meta(num 1731659177542) 2024-11-15T08:30:48,764 INFO [RS:0;48e8ad8dc936:37511 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 48e8ad8dc936%2C37511%2C1731659175104:(num 1731659177139) 2024-11-15T08:30:48,764 DEBUG [RS:2;48e8ad8dc936:34555 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,764 DEBUG [RS:0;48e8ad8dc936:37511 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,764 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T08:30:48,764 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T08:30:48,764 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T08:30:48,764 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T08:30:48,765 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.ChoreService(370): Chore service for: regionserver/48e8ad8dc936:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T08:30:48,765 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.ChoreService(370): Chore service for: regionserver/48e8ad8dc936:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-15T08:30:48,765 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T08:30:48,765 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-15T08:30:48,765 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T08:30:48,765 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-15T08:30:48,765 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T08:30:48,765 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-15T08:30:48,765 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T08:30:48,765 INFO [regionserver/48e8ad8dc936:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T08:30:48,765 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T08:30:48,765 INFO [regionserver/48e8ad8dc936:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T08:30:48,766 INFO [RS:0;48e8ad8dc936:37511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37511 2024-11-15T08:30:48,766 INFO [RS:2;48e8ad8dc936:34555 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34555 2024-11-15T08:30:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073741835_1011 (size=15734) 2024-11-15T08:30:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073741835_1011 (size=15734) 2024-11-15T08:30:48,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073741835_1011 (size=15734) 2024-11-15T08:30:48,770 DEBUG [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/oldWALs 2024-11-15T08:30:48,770 INFO [RS:1;48e8ad8dc936:40637 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 48e8ad8dc936%2C40637%2C1731659175269:(num 1731659177140) 2024-11-15T08:30:48,770 DEBUG [RS:1;48e8ad8dc936:40637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-15T08:30:48,770 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.LeaseManager(133): Closed leases 2024-11-15T08:30:48,770 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T08:30:48,770 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.ChoreService(370): Chore service for: regionserver/48e8ad8dc936:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-15T08:30:48,770 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T08:30:48,771 INFO [regionserver/48e8ad8dc936:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T08:30:48,771 INFO [RS:1;48e8ad8dc936:40637 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40637 2024-11-15T08:30:48,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-15T08:30:48,772 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/48e8ad8dc936,34555,1731659175344 2024-11-15T08:30:48,772 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T08:30:48,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/48e8ad8dc936,37511,1731659175104 2024-11-15T08:30:48,773 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T08:30:48,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/48e8ad8dc936,40637,1731659175269 2024-11-15T08:30:48,773 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T08:30:48,774 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [48e8ad8dc936,37511,1731659175104] 2024-11-15T08:30:48,776 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/48e8ad8dc936,37511,1731659175104 already deleted, retry=false 2024-11-15T08:30:48,777 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 48e8ad8dc936,37511,1731659175104 expired; onlineServers=2 2024-11-15T08:30:48,777 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [48e8ad8dc936,34555,1731659175344] 2024-11-15T08:30:48,778 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/48e8ad8dc936,34555,1731659175344 already deleted, retry=false 2024-11-15T08:30:48,778 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 48e8ad8dc936,34555,1731659175344 expired; onlineServers=1 2024-11-15T08:30:48,778 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [48e8ad8dc936,40637,1731659175269] 2024-11-15T08:30:48,779 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/48e8ad8dc936,40637,1731659175269 already deleted, retry=false 2024-11-15T08:30:48,779 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 48e8ad8dc936,40637,1731659175269 expired; onlineServers=0 2024-11-15T08:30:48,779 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '48e8ad8dc936,35169,1731659174197' ***** 2024-11-15T08:30:48,779 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-15T08:30:48,779 INFO [M:0;48e8ad8dc936:35169 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-15T08:30:48,779 INFO [M:0;48e8ad8dc936:35169 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-15T08:30:48,780 DEBUG [M:0;48e8ad8dc936:35169 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-15T08:30:48,780 DEBUG [M:0;48e8ad8dc936:35169 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-15T08:30:48,780 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-15T08:30:48,780 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster-HFileCleaner.large.0-1731659176711 {}] cleaner.HFileCleaner(306): Exit Thread[master/48e8ad8dc936:0:becomeActiveMaster-HFileCleaner.large.0-1731659176711,5,FailOnTimeoutGroup] 2024-11-15T08:30:48,780 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster-HFileCleaner.small.0-1731659176716 {}] cleaner.HFileCleaner(306): Exit Thread[master/48e8ad8dc936:0:becomeActiveMaster-HFileCleaner.small.0-1731659176716,5,FailOnTimeoutGroup] 2024-11-15T08:30:48,780 INFO [M:0;48e8ad8dc936:35169 {}] hbase.ChoreService(370): Chore service for: master/48e8ad8dc936:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-15T08:30:48,780 INFO [M:0;48e8ad8dc936:35169 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-15T08:30:48,780 DEBUG [M:0;48e8ad8dc936:35169 {}] master.HMaster(1795): Stopping service threads 2024-11-15T08:30:48,780 INFO [M:0;48e8ad8dc936:35169 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-15T08:30:48,780 INFO [M:0;48e8ad8dc936:35169 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-15T08:30:48,781 INFO [M:0;48e8ad8dc936:35169 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-15T08:30:48,781 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-15T08:30:48,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-15T08:30:48,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-15T08:30:48,781 DEBUG [M:0;48e8ad8dc936:35169 {}] zookeeper.ZKUtil(347): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-15T08:30:48,781 WARN [M:0;48e8ad8dc936:35169 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-15T08:30:48,782 INFO [M:0;48e8ad8dc936:35169 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/.lastflushedseqids 2024-11-15T08:30:48,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32865 is added to blk_1073742352_1528 (size=329) 2024-11-15T08:30:48,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37633 is added to blk_1073742352_1528 (size=329) 2024-11-15T08:30:48,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38947 is added to blk_1073742352_1528 (size=329) 2024-11-15T08:30:48,794 INFO [M:0;48e8ad8dc936:35169 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-15T08:30:48,795 INFO [M:0;48e8ad8dc936:35169 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-15T08:30:48,795 DEBUG [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-15T08:30:48,815 INFO [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:30:48,815 DEBUG [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:30:48,815 DEBUG [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-15T08:30:48,815 DEBUG [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-15T08:30:48,815 INFO [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=823.85 KB heapSize=988.65 KB 2024-11-15T08:30:48,816 ERROR [AsyncFSWAL-0-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:30:48,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:30:48,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:30:48,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34555-0x1011abbf93d0003, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:30:48,875 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37511-0x1011abbf93d0001, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:30:48,876 INFO [RS:2;48e8ad8dc936:34555 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T08:30:48,876 INFO [RS:0;48e8ad8dc936:37511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T08:30:48,876 INFO [RS:0;48e8ad8dc936:37511 {}] regionserver.HRegionServer(1031): Exiting; stopping=48e8ad8dc936,37511,1731659175104; zookeeper connection closed. 2024-11-15T08:30:48,876 INFO [RS:2;48e8ad8dc936:34555 {}] regionserver.HRegionServer(1031): Exiting; stopping=48e8ad8dc936,34555,1731659175344; zookeeper connection closed. 2024-11-15T08:30:48,876 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@47b194d9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@47b194d9 2024-11-15T08:30:48,876 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@25d428b8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@25d428b8 2024-11-15T08:30:48,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:30:48,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40637-0x1011abbf93d0002, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:30:48,877 INFO [RS:1;48e8ad8dc936:40637 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T08:30:48,877 INFO [RS:1;48e8ad8dc936:40637 {}] regionserver.HRegionServer(1031): Exiting; stopping=48e8ad8dc936,40637,1731659175269; zookeeper connection closed. 2024-11-15T08:30:48,877 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6fd58ecd {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6fd58ecd 2024-11-15T08:30:48,877 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-15T08:30:54,092 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:30:54,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:54,710 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-15T08:30:54,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-15T08:30:54,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-15T08:30:54,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-15T08:30:54,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:30:54,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-15T08:30:54,711 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-15T08:31:00,212 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:31:13,478 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:31:15,473 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-11-15T08:31:15,473 DEBUG [master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-15T08:31:23,113 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-15T08:31:43,478 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;48e8ad8dc936:35169 227 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@b5cfd4a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fcd172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3408 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.CountDownLatch$Sync@3569561b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10269 Waited count: 10840 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@26018be4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@6030b917 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 676 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@36683139-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44195}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 2900 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1eb60d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44023): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 33275 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ed3adea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44023): State: TIMED_WAITING Blocked count: 69 Waited count: 2024 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44023): State: TIMED_WAITING Blocked count: 73 Waited count: 2017 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44023): State: TIMED_WAITING Blocked count: 57 Waited count: 2018 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44023): State: TIMED_WAITING Blocked count: 53 Waited count: 2013 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44023): State: TIMED_WAITING Blocked count: 50 Waited count: 2000 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2139971415)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@b0684ed-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:42601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 673 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45925): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 283 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e6bb1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1204 Waited count: 1293 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1858979425-122-acceptor-0@4ebd7618-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:36651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1655466367) connection to localhost/127.0.0.1:44023 from jenkins): State: TIMED_WAITING Blocked count: 1211 Waited count: 1212 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1858979425-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 0 Waited count: 1814 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 673 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35191): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 244 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58612ac3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1221 Waited count: 1296 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 337 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp586058363-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp586058363-158-acceptor-0@1c0554c2-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:38923}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp586058363-159): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp586058363-160): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 672 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 180 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server idle connection scanner for port 46235): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 188 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 0 Waited count: 281 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17016de8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1162 Waited count: 1293 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (java.util.concurrent.ThreadPoolExecutor$Worker@12b52dca[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4c082299[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 181 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 336 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4cfeb252[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59481): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f108a23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:59481):): State: WAITING Blocked count: 1 Waited count: 437 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1876a769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 467 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be60a25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9f2511 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:59481)): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68f3072c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@62ecfd40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 83 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 57 Waited count: 269 Waiting on java.util.concurrent.Semaphore$NonfairSync@655cfba1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 1 Waited count: 23 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a4ee2f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 162 Waited count: 571 Waiting on java.util.concurrent.Semaphore$NonfairSync@3a7446f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169): State: WAITING Blocked count: 61 Waited count: 6454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ff7aa07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@10d3a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@184b76b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a6e402d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@17c5d4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7770b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 101 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;48e8ad8dc936:35169): State: TIMED_WAITING Blocked count: 12 Waited count: 2743 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1107/0x00007f2888f6aca8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@178e8ee4): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3326 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 128 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33204 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 39 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2281c952 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ae7a9d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a806c7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@207d4d5c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 509 (LeaseRenewer:jenkins.hfs.0@localhost:44023): State: TIMED_WAITING Blocked count: 9 Waited count: 346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 512 (LeaseRenewer:jenkins.hfs.2@localhost:44023): State: TIMED_WAITING Blocked count: 10 Waited count: 347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (LeaseRenewer:jenkins.hfs.1@localhost:44023): State: TIMED_WAITING Blocked count: 9 Waited count: 346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 519 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33067 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 562 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 563 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 350 Waiting on java.util.concurrent.ForkJoinPool@1b247a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 587 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 990 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 388 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1053 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4803ac2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1462 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@12996e79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1626 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 789 Waiting on java.util.concurrent.ForkJoinPool@1b247a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2914 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2915 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4499 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4500 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4501 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8061 (AsyncFSWAL-1-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad552f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8066 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T08:32:13,479 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:32:43,479 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;48e8ad8dc936:35169 223 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@b5cfd4a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fcd172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4008 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.CountDownLatch$Sync@dfaae65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10269 Waited count: 10841 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@26018be4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@6030b917 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 796 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@36683139-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44195}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 2900 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1eb60d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44023): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 39240 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ed3adea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44023): State: TIMED_WAITING Blocked count: 69 Waited count: 2085 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44023): State: TIMED_WAITING Blocked count: 73 Waited count: 2078 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44023): State: TIMED_WAITING Blocked count: 57 Waited count: 2079 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44023): State: TIMED_WAITING Blocked count: 53 Waited count: 2074 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44023): State: TIMED_WAITING Blocked count: 50 Waited count: 2062 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2139971415)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@b0684ed-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:42601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 793 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45925): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 303 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e6bb1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1224 Waited count: 1333 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1858979425-122-acceptor-0@4ebd7618-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:36651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1655466367) connection to localhost/127.0.0.1:44023 from jenkins): State: TIMED_WAITING Blocked count: 1271 Waited count: 1272 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1858979425-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 0 Waited count: 1874 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 793 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35191): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 264 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58612ac3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1241 Waited count: 1336 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 397 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp586058363-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp586058363-158-acceptor-0@1c0554c2-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:38923}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp586058363-159): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp586058363-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 792 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 180 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server idle connection scanner for port 46235): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 188 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 0 Waited count: 301 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17016de8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1182 Waited count: 1333 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (java.util.concurrent.ThreadPoolExecutor$Worker@12b52dca[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4c082299[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 181 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 396 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4cfeb252[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59481): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 348 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f108a23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:59481):): State: WAITING Blocked count: 1 Waited count: 442 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1876a769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 472 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be60a25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9f2511 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:59481)): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68f3072c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@62ecfd40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 57 Waited count: 269 Waiting on java.util.concurrent.Semaphore$NonfairSync@655cfba1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 1 Waited count: 23 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a4ee2f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 162 Waited count: 571 Waiting on java.util.concurrent.Semaphore$NonfairSync@3a7446f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169): State: WAITING Blocked count: 61 Waited count: 6454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ff7aa07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@10d3a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@184b76b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a6e402d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@17c5d4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7770b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 101 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;48e8ad8dc936:35169): State: TIMED_WAITING Blocked count: 12 Waited count: 2743 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1107/0x00007f2888f6aca8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@178e8ee4): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3926 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70281fe1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39206 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 39 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2281c952 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ae7a9d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a806c7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@207d4d5c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39068 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 350 Waiting on java.util.concurrent.ForkJoinPool@1b247a16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 587 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 990 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1053 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4803ac2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1462 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@12996e79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1626 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 790 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2914 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2915 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4499 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4500 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4501 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8061 (AsyncFSWAL-1-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad552f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8066 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T08:33:13,479 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:33:43,479 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;48e8ad8dc936:35169 222 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@b5cfd4a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fcd172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4608 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@54c9e305 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10269 Waited count: 10842 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@26018be4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@6030b917 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 916 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@36683139-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44195}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 2900 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1eb60d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44023): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45204 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ed3adea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44023): State: TIMED_WAITING Blocked count: 69 Waited count: 2146 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44023): State: TIMED_WAITING Blocked count: 73 Waited count: 2139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44023): State: TIMED_WAITING Blocked count: 57 Waited count: 2140 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44023): State: TIMED_WAITING Blocked count: 53 Waited count: 2134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44023): State: TIMED_WAITING Blocked count: 50 Waited count: 2122 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2139971415)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@b0684ed-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:42601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45925): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 323 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e6bb1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1244 Waited count: 1373 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1858979425-122-acceptor-0@4ebd7618-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:36651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1655466367) connection to localhost/127.0.0.1:44023 from jenkins): State: TIMED_WAITING Blocked count: 1331 Waited count: 1332 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1858979425-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 0 Waited count: 1934 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 913 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35191): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 284 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58612ac3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1261 Waited count: 1376 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 457 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp586058363-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp586058363-158-acceptor-0@1c0554c2-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:38923}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp586058363-159): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp586058363-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 912 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 180 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server idle connection scanner for port 46235): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 188 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 0 Waited count: 321 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17016de8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1202 Waited count: 1373 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (java.util.concurrent.ThreadPoolExecutor$Worker@12b52dca[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4c082299[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 181 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 456 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4cfeb252[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59481): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 352 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f108a23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:59481):): State: WAITING Blocked count: 1 Waited count: 446 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1876a769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 476 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be60a25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9f2511 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 288 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:59481)): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68f3072c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@62ecfd40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 84 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 57 Waited count: 269 Waiting on java.util.concurrent.Semaphore$NonfairSync@655cfba1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 1 Waited count: 23 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a4ee2f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 162 Waited count: 571 Waiting on java.util.concurrent.Semaphore$NonfairSync@3a7446f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169): State: WAITING Blocked count: 61 Waited count: 6454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ff7aa07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@10d3a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@184b76b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a6e402d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@17c5d4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7770b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 101 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;48e8ad8dc936:35169): State: TIMED_WAITING Blocked count: 12 Waited count: 2743 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1107/0x00007f2888f6aca8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@178e8ee4): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4526 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70281fe1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45208 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 39 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2281c952 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ae7a9d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a806c7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@207d4d5c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45070 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 563 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 351 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 587 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 990 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1053 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4803ac2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1462 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@12996e79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2914 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2915 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4499 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4500 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4501 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8061 (AsyncFSWAL-1-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad552f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8066 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T08:34:13,479 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:34:43,480 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;48e8ad8dc936:35169 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@b5cfd4a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fcd172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@548b5988 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10269 Waited count: 10843 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@26018be4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@6030b917 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1036 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@36683139-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44195}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 2900 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1eb60d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44023): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ed3adea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44023): State: TIMED_WAITING Blocked count: 69 Waited count: 2207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44023): State: TIMED_WAITING Blocked count: 73 Waited count: 2200 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44023): State: TIMED_WAITING Blocked count: 57 Waited count: 2201 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44023): State: TIMED_WAITING Blocked count: 53 Waited count: 2196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44023): State: TIMED_WAITING Blocked count: 50 Waited count: 2184 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2139971415)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@b0684ed-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:42601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 1 Waited count: 1033 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45925): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 343 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e6bb1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1264 Waited count: 1413 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 519 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1858979425-122-acceptor-0@4ebd7618-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:36651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1655466367) connection to localhost/127.0.0.1:44023 from jenkins): State: TIMED_WAITING Blocked count: 1391 Waited count: 1392 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1858979425-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 0 Waited count: 1994 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 1033 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35191): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 304 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58612ac3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1281 Waited count: 1416 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 517 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp586058363-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp586058363-158-acceptor-0@1c0554c2-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:38923}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp586058363-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp586058363-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 1032 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 180 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server idle connection scanner for port 46235): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 188 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 0 Waited count: 341 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17016de8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1222 Waited count: 1413 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (java.util.concurrent.ThreadPoolExecutor$Worker@12b52dca[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4c082299[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 181 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 519 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 516 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4cfeb252[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59481): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 357 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f108a23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:59481):): State: WAITING Blocked count: 1 Waited count: 451 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1876a769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 481 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be60a25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9f2511 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 316 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:59481)): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68f3072c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@62ecfd40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 57 Waited count: 269 Waiting on java.util.concurrent.Semaphore$NonfairSync@655cfba1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 1 Waited count: 23 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a4ee2f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 162 Waited count: 571 Waiting on java.util.concurrent.Semaphore$NonfairSync@3a7446f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169): State: WAITING Blocked count: 61 Waited count: 6454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ff7aa07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@10d3a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@184b76b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a6e402d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@17c5d4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7770b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 101 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;48e8ad8dc936:35169): State: TIMED_WAITING Blocked count: 12 Waited count: 2743 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1107/0x00007f2888f6aca8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@178e8ee4): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5125 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70281fe1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51209 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 39 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2281c952 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ae7a9d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a806c7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@207d4d5c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51071 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 990 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1053 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4803ac2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1462 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@12996e79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2914 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2915 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4499 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4500 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4501 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8061 (AsyncFSWAL-1-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad552f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8066 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-15T08:35:13,480 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:35:43,480 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-15T08:35:48,817 DEBUG [M:0;48e8ad8dc936:35169 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731659448795Disabling compacts and flushes for region at 1731659448795Disabling writes for close at 1731659448815 (+20 ms)Obtaining lock to block concurrent updates at 1731659448815Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731659448815Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=843626, getHeapSize=1012312, getOffHeapSize=0, getCellsCount=2221 at 1731659448816 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1731659748817 (+300001 ms) 2024-11-15T08:35:48,817 WARN [M:0;48e8ad8dc936:35169 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3824, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-15T08:35:48,818 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:35:48,820 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-15T08:35:48,820 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-15T08:35:48,820 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 2024-11-15T08:35:48,822 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:35:48,823 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:35:48,823 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 2024-11-15T08:35:48,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;48e8ad8dc936:35169 224 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 38 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@b5cfd4a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 18 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fcd172 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5807 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@65aa38d8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 10269 Waited count: 10844 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 10 Waited count: 11 Waiting on java.lang.ref.ReferenceQueue$Lock@26018be4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@6030b917 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@52ed31ff): State: TIMED_WAITING Blocked count: 0 Waited count: 1156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1843407950-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1843407950-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1843407950-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1843407950-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1843407950-41-acceptor-0@36683139-ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:44195}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1843407950-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1843407950-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1843407950-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5955aada-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 2900 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1eb60d5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 44023): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@5cb1c71c): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@4524a3c7): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57133 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1462 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ed3adea Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 44023): State: TIMED_WAITING Blocked count: 69 Waited count: 2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 44023): State: TIMED_WAITING Blocked count: 73 Waited count: 2261 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 44023): State: TIMED_WAITING Blocked count: 57 Waited count: 2262 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 44023): State: TIMED_WAITING Blocked count: 53 Waited count: 2257 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 44023): State: TIMED_WAITING Blocked count: 50 Waited count: 2245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@3c4916c2): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@62d0c398): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2fbefd0a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@6a446ca): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(2139971415)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1779204877-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1779204877-88-acceptor-0@b0684ed-ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:42601}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1779204877-89): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1779204877-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-7899dc43-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@17db9bff): State: TIMED_WAITING Blocked count: 1 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 45925): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 363 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@51e6bb1f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1284 Waited count: 1453 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4fc9de06): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 45925): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1858979425-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp1858979425-122-acceptor-0@4ebd7618-ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:36651}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1655466367) connection to localhost/127.0.0.1:44023 from jenkins): State: TIMED_WAITING Blocked count: 1451 Waited count: 1452 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 123 (qtp1858979425-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1858979425-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-666943b4-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 116 (IPC Parameter Sending Thread for localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 0 Waited count: 2054 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@45066529): State: TIMED_WAITING Blocked count: 0 Waited count: 1153 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 35191): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 0 Waited count: 324 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58612ac3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1301 Waited count: 1456 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@22fa74d2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 35191): State: TIMED_WAITING Blocked count: 0 Waited count: 577 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp586058363-157): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f288842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp586058363-158-acceptor-0@1c0554c2-ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:38923}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp586058363-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp586058363-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-7e229a1f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4)): State: TIMED_WAITING Blocked count: 10 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 173 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 178 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 179 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@8153a2): State: TIMED_WAITING Blocked count: 0 Waited count: 1152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 180 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 183 (IPC Server idle connection scanner for port 46235): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 188 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 196 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (Command processor): State: WAITING Blocked count: 0 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17016de8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 201 (BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023): State: TIMED_WAITING Blocked count: 1242 Waited count: 1453 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (pool-44-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (java.util.concurrent.ThreadPoolExecutor$Worker@12b52dca[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@4c082299[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c063c65): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 181 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 209 (IPC Server handler 0 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 210 (IPC Server handler 1 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 2 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 46235): State: TIMED_WAITING Blocked count: 0 Waited count: 576 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 216 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 221 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 227 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (java.util.concurrent.ThreadPoolExecutor$Worker@4cfeb252[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 238 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:59481): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 236 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 240 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 241 (SyncThread:0): State: WAITING Blocked count: 12 Waited count: 361 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5f108a23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 242 (ProcessThread(sid:0 cport:59481):): State: WAITING Blocked count: 1 Waited count: 455 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1876a769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 243 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 485 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@be60a25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 244 (NIOWorkerThread-1): State: WAITING Blocked count: 3 Waited count: 89 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@7e9f2511 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 28 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (Time-limited test-SendThread(127.0.0.1:59481)): State: RUNNABLE Blocked count: 38 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 259 (Time-limited test-EventThread): State: WAITING Blocked count: 4 Waited count: 54 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@68f3072c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 260 (NIOWorkerThread-2): State: WAITING Blocked count: 2 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-3): State: WAITING Blocked count: 1 Waited count: 89 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-4): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (zk-event-processor-pool-0): State: WAITING Blocked count: 19 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@62ecfd40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-5): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-6): State: WAITING Blocked count: 4 Waited count: 88 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-8): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-9): State: WAITING Blocked count: 0 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-10): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-11): State: WAITING Blocked count: 5 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-12): State: WAITING Blocked count: 1 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-13): State: WAITING Blocked count: 4 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-14): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-16): State: WAITING Blocked count: 2 Waited count: 86 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2585e210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 57 Waited count: 269 Waiting on java.util.concurrent.Semaphore$NonfairSync@655cfba1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 1 Waited count: 23 Waiting on java.util.concurrent.Semaphore$NonfairSync@4a4ee2f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 162 Waited count: 571 Waiting on java.util.concurrent.Semaphore$NonfairSync@3a7446f4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35169): State: WAITING Blocked count: 61 Waited count: 6454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3ff7aa07 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3d74dfab Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@10d3a1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@184b76b1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1a6e402d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35169): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@17c5d4c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7770b19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 290 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 101 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 287 (M:0;48e8ad8dc936:35169): State: TIMED_WAITING Blocked count: 12 Waited count: 2744 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1357/0x00007f28891b8220.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/48e8ad8dc936:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@178e8ee4): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 382 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5725 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 399 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 93 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 400 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 61 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 414 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 146 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@70281fe1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 413 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57210 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 434 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 39 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 435 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 459 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2281c952 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 482 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ae7a9d4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 484 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1a806c7e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 483 (regionserver/48e8ad8dc936:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@207d4d5c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 519 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 529 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 530 (region-location-0): State: WAITING Blocked count: 7 Waited count: 12 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 412 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57072 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 546 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 548 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 12 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 587 (region-location-1): State: WAITING Blocked count: 6 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 588 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 990 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1053 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1083 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1096 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4803ac2a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1141 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1142 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1143 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1462 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@12996e79 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2914 (region-location-3): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2915 (region-location-4): State: WAITING Blocked count: 1 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@c9acb58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4499 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4500 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 4501 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8061 (AsyncFSWAL-1-hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData-prefix:48e8ad8dc936,35169,1731659174197): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2ad552f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8066 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8067 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8071 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8072 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1340/0x00007f28891b0228.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-15T08:35:52,823 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-15T08:35:53,819 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-15T08:35:53,819 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-15T08:35:53,819 INFO [M:0;48e8ad8dc936:35169 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-15T08:35:53,819 INFO [M:0;48e8ad8dc936:35169 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35169 2024-11-15T08:35:53,819 INFO [M:0;48e8ad8dc936:35169 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-15T08:35:53,825 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:44023/user/jenkins/test-data/6f120f80-5512-9e95-5e42-cf1caa2aeca1/MasterData/WALs/48e8ad8dc936,35169,1731659174197/48e8ad8dc936%2C35169%2C1731659174197.1731659175876 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-15T08:35:53,921 INFO [M:0;48e8ad8dc936:35169 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-15T08:35:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:35:53,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35169-0x1011abbf93d0000, quorum=127.0.0.1:59481, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-15T08:35:53,925 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d3bb473{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T08:35:53,925 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@127441c8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:35:53,925 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:35:53,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@544a54bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T08:35:53,926 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e41eadb{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:35:53,927 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T08:35:53,927 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T08:35:53,927 WARN [BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T08:35:53,927 WARN [BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366914351-172.17.0.2-1731659170365 (Datanode Uuid 759e3899-d572-409a-8cbc-d6e36be1dc36) service to localhost/127.0.0.1:44023 2024-11-15T08:35:53,929 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data5/current/BP-1366914351-172.17.0.2-1731659170365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T08:35:53,929 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data6/current/BP-1366914351-172.17.0.2-1731659170365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T08:35:53,930 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T08:35:53,933 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@19299b82{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T08:35:53,934 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2bf72d86{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:35:53,934 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:35:53,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@246a58e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T08:35:53,934 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ed86ab3{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:35:53,935 WARN [BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T08:35:53,935 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T08:35:53,935 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T08:35:53,935 WARN [BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366914351-172.17.0.2-1731659170365 (Datanode Uuid 0a1a9b44-863a-4e7a-8b1e-cbf3e3019079) service to localhost/127.0.0.1:44023 2024-11-15T08:35:53,936 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data3/current/BP-1366914351-172.17.0.2-1731659170365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T08:35:53,936 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data4/current/BP-1366914351-172.17.0.2-1731659170365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T08:35:53,936 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T08:35:53,939 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1adf9305{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-15T08:35:53,939 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36dae46f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:35:53,939 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:35:53,940 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@480cc207{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T08:35:53,940 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134642c1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:35:53,941 WARN [BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-15T08:35:53,941 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-15T08:35:53,941 WARN [BP-1366914351-172.17.0.2-1731659170365 heartbeating to localhost/127.0.0.1:44023 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366914351-172.17.0.2-1731659170365 (Datanode Uuid 205d8336-1109-4287-9974-ecd9ff72979d) service to localhost/127.0.0.1:44023 2024-11-15T08:35:53,941 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-15T08:35:53,942 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data1/current/BP-1366914351-172.17.0.2-1731659170365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T08:35:53,942 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/cluster_94a03bab-9c10-16f5-a101-e312c1e92673/data/data2/current/BP-1366914351-172.17.0.2-1731659170365 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-15T08:35:53,942 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-15T08:35:53,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a55f3e1{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-15T08:35:53,949 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@797ff5a9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-15T08:35:53,949 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-15T08:35:53,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d167fe8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-15T08:35:53,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74e30e0c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8fac76a7-6b00-fc11-e8d1-8327f671f35a/hadoop.log.dir/,STOPPED} 2024-11-15T08:35:53,960 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-15T08:35:54,037 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down