2024-12-16 19:43:47,062 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 2024-12-16 19:43:47,092 main DEBUG Took 0.025662 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-16 19:43:47,093 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-16 19:43:47,093 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-16 19:43:47,097 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-16 19:43:47,100 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,128 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-16 19:43:47,171 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,173 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,175 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,176 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,177 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,177 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,180 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,181 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,194 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,195 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,199 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,200 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,208 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,211 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,212 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,212 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,213 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,213 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,214 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,215 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,215 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,216 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,216 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,217 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-16 19:43:47,217 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,218 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-16 19:43:47,220 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-16 19:43:47,222 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-16 19:43:47,248 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-16 19:43:47,273 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-16 19:43:47,275 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-16 19:43:47,276 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-16 19:43:47,322 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-16 19:43:47,342 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-16 19:43:47,355 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-16 19:43:47,356 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-16 19:43:47,356 main DEBUG createAppenders(={Console}) 2024-12-16 19:43:47,366 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 initialized 2024-12-16 19:43:47,366 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 2024-12-16 19:43:47,367 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@5fb759d6 OK. 2024-12-16 19:43:47,368 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-16 19:43:47,368 main DEBUG OutputStream closed 2024-12-16 19:43:47,369 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-16 19:43:47,369 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-16 19:43:47,371 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5b03b9fe OK 2024-12-16 19:43:47,658 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-16 19:43:47,661 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-16 19:43:47,662 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-16 19:43:47,664 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-16 19:43:47,665 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-16 19:43:47,666 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-16 19:43:47,666 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-16 19:43:47,666 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-16 19:43:47,667 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-16 19:43:47,667 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-16 19:43:47,668 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-16 19:43:47,668 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-16 19:43:47,668 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-16 19:43:47,669 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-16 19:43:47,669 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-16 19:43:47,673 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-16 19:43:47,674 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-16 19:43:47,676 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-16 19:43:47,682 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16 19:43:47,683 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@b2c5e07) with optional ClassLoader: null 2024-12-16 19:43:47,683 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-16 19:43:47,684 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@b2c5e07] started OK. 2024-12-16T19:43:47,729 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-16 19:43:47,738 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-16 19:43:47,739 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-16T19:43:48,270 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60 2024-12-16T19:43:48,271 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-12-16T19:43:48,271 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-12-16T19:43:48,373 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-16T19:43:48,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-16T19:43:48,821 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4, deleteOnExit=true 2024-12-16T19:43:48,821 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-16T19:43:48,823 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/test.cache.data in system properties and HBase conf 2024-12-16T19:43:48,825 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T19:43:48,826 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir in system properties and HBase conf 2024-12-16T19:43:48,833 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T19:43:48,835 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T19:43:48,836 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T19:43:49,074 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-16T19:43:49,082 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T19:43:49,083 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T19:43:49,084 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T19:43:49,085 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T19:43:49,085 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T19:43:49,086 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T19:43:49,087 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T19:43:49,087 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T19:43:49,088 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T19:43:49,089 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/nfs.dump.dir in system properties and HBase conf 2024-12-16T19:43:49,089 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir in system properties and HBase conf 2024-12-16T19:43:49,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T19:43:49,091 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T19:43:49,092 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T19:43:50,539 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-16T19:43:50,670 INFO [Time-limited test {}] log.Log(170): Logging initialized @5205ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-16T19:43:50,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:43:50,887 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:43:50,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:43:50,958 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:43:50,961 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T19:43:51,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:43:51,019 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@744df411{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:43:51,020 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70357eda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T19:43:51,360 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5da2d515{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-34747-hadoop-hdfs-3_4_1-tests_jar-_-any-10844957723147737641/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T19:43:51,379 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:34747} 2024-12-16T19:43:51,379 INFO [Time-limited test {}] server.Server(415): Started @5916ms 2024-12-16T19:43:52,103 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:43:52,114 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:43:52,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:43:52,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:43:52,122 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T19:43:52,124 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a0dab5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:43:52,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d0a94c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T19:43:52,267 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@57cef19b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-45121-hadoop-hdfs-3_4_1-tests_jar-_-any-2200643313030648631/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T19:43:52,268 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:45121} 2024-12-16T19:43:52,268 INFO [Time-limited test {}] server.Server(415): Started @6804ms 2024-12-16T19:43:52,341 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T19:43:52,537 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:43:52,544 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:43:52,574 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:43:52,575 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:43:52,575 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T19:43:52,582 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b9deb82{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:43:52,584 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4b9c8e49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T19:43:52,728 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b96c006{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-33277-hadoop-hdfs-3_4_1-tests_jar-_-any-15422731553426935355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T19:43:52,729 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:33277} 2024-12-16T19:43:52,730 INFO [Time-limited test {}] server.Server(415): Started @7266ms 2024-12-16T19:43:52,733 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T19:43:52,794 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:43:52,806 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:43:52,813 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:43:52,813 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:43:52,813 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T19:43:52,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ce8b71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:43:52,819 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@622b2501{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-16T19:43:52,947 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@52f77f0c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-41355-hadoop-hdfs-3_4_1-tests_jar-_-any-13373621090218761391/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T19:43:52,948 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:41355} 2024-12-16T19:43:52,948 INFO [Time-limited test {}] server.Server(415): Started @7484ms 2024-12-16T19:43:52,952 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-16T19:43:53,844 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927/current, will proceed with Du for space computation calculation, 2024-12-16T19:43:53,844 WARN [Thread-120 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927/current, will proceed with Du for space computation calculation, 2024-12-16T19:43:53,893 WARN [Thread-128 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927/current, will proceed with Du for space computation calculation, 2024-12-16T19:43:53,894 WARN [Thread-129 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927/current, will proceed with Du for space computation calculation, 2024-12-16T19:43:53,904 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T19:43:53,932 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T19:43:53,976 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc3c4b72bd52be8c with lease ID 0xf91f7bf8a51e7191: Processing first storage report for DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf from datanode DatanodeRegistration(127.0.0.1:39241, datanodeUuid=5b533e50-8d8a-4498-97b8-acb11831b9a2, infoPort=33371, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927) 2024-12-16T19:43:53,977 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc3c4b72bd52be8c with lease ID 0xf91f7bf8a51e7191: from storage DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf node DatanodeRegistration(127.0.0.1:39241, datanodeUuid=5b533e50-8d8a-4498-97b8-acb11831b9a2, infoPort=33371, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-16T19:43:53,978 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x181c90f5534bf3ee with lease ID 0xf91f7bf8a51e7190: Processing first storage report for DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1 from datanode DatanodeRegistration(127.0.0.1:43893, datanodeUuid=2888fb3c-2097-4f16-bd71-ae02aa7cea00, infoPort=45671, infoSecurePort=0, ipcPort=36193, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927) 2024-12-16T19:43:53,978 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x181c90f5534bf3ee with lease ID 0xf91f7bf8a51e7190: from storage DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1 node DatanodeRegistration(127.0.0.1:43893, datanodeUuid=2888fb3c-2097-4f16-bd71-ae02aa7cea00, infoPort=45671, infoSecurePort=0, ipcPort=36193, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T19:43:53,979 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcc3c4b72bd52be8c with lease ID 0xf91f7bf8a51e7191: Processing first storage report for DS-d7b2d3c8-8285-4fc8-b690-31340d51509d from datanode DatanodeRegistration(127.0.0.1:39241, datanodeUuid=5b533e50-8d8a-4498-97b8-acb11831b9a2, infoPort=33371, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927) 2024-12-16T19:43:53,979 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcc3c4b72bd52be8c with lease ID 0xf91f7bf8a51e7191: from storage DS-d7b2d3c8-8285-4fc8-b690-31340d51509d node DatanodeRegistration(127.0.0.1:39241, datanodeUuid=5b533e50-8d8a-4498-97b8-acb11831b9a2, infoPort=33371, infoSecurePort=0, ipcPort=44575, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T19:43:53,980 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x181c90f5534bf3ee with lease ID 0xf91f7bf8a51e7190: Processing first storage report for DS-c709ee7e-9c4a-4037-a68f-47236c1206e4 from datanode DatanodeRegistration(127.0.0.1:43893, datanodeUuid=2888fb3c-2097-4f16-bd71-ae02aa7cea00, infoPort=45671, infoSecurePort=0, ipcPort=36193, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927) 2024-12-16T19:43:53,980 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x181c90f5534bf3ee with lease ID 0xf91f7bf8a51e7190: from storage DS-c709ee7e-9c4a-4037-a68f-47236c1206e4 node DatanodeRegistration(127.0.0.1:43893, datanodeUuid=2888fb3c-2097-4f16-bd71-ae02aa7cea00, infoPort=45671, infoSecurePort=0, ipcPort=36193, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T19:43:54,116 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927/current, will proceed with Du for space computation calculation, 2024-12-16T19:43:54,118 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927/current, will proceed with Du for space computation calculation, 2024-12-16T19:43:54,184 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-16T19:43:54,204 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb224a37f61903d58 with lease ID 0xf91f7bf8a51e7192: Processing first storage report for DS-496d9931-f607-4803-8fbb-64e05aba6f02 from datanode DatanodeRegistration(127.0.0.1:40033, datanodeUuid=65a82667-85a4-4311-882b-d13d249bdfc6, infoPort=34263, infoSecurePort=0, ipcPort=40185, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927) 2024-12-16T19:43:54,204 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb224a37f61903d58 with lease ID 0xf91f7bf8a51e7192: from storage DS-496d9931-f607-4803-8fbb-64e05aba6f02 node DatanodeRegistration(127.0.0.1:40033, datanodeUuid=65a82667-85a4-4311-882b-d13d249bdfc6, infoPort=34263, infoSecurePort=0, ipcPort=40185, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-16T19:43:54,209 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb224a37f61903d58 with lease ID 0xf91f7bf8a51e7192: Processing first storage report for DS-5c5f74fe-057b-43a1-a97b-fc5c43e25f1c from datanode DatanodeRegistration(127.0.0.1:40033, datanodeUuid=65a82667-85a4-4311-882b-d13d249bdfc6, infoPort=34263, infoSecurePort=0, ipcPort=40185, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927) 2024-12-16T19:43:54,209 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb224a37f61903d58 with lease ID 0xf91f7bf8a51e7192: from storage DS-5c5f74fe-057b-43a1-a97b-fc5c43e25f1c node DatanodeRegistration(127.0.0.1:40033, datanodeUuid=65a82667-85a4-4311-882b-d13d249bdfc6, infoPort=34263, infoSecurePort=0, ipcPort=40185, storageInfo=lv=-57;cid=testClusterID;nsid=565152765;c=1734378229927), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-16T19:43:54,241 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60 2024-12-16T19:43:54,387 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/zookeeper_0, clientPort=62515, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-16T19:43:54,404 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=62515 2024-12-16T19:43:54,421 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:54,428 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:54,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741825_1001 (size=7) 2024-12-16T19:43:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741825_1001 (size=7) 2024-12-16T19:43:54,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741825_1001 (size=7) 2024-12-16T19:43:54,891 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c with version=8 2024-12-16T19:43:54,892 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/hbase-staging 2024-12-16T19:43:55,193 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-16T19:43:55,638 INFO [Time-limited test {}] client.ConnectionUtils(129): master/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T19:43:55,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:55,667 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:55,668 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T19:43:55,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:55,668 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T19:43:55,955 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T19:43:56,051 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-16T19:43:56,066 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-16T19:43:56,071 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T19:43:56,111 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 51216 (auto-detected) 2024-12-16T19:43:56,112 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-16T19:43:56,139 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45883 2024-12-16T19:43:56,152 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:56,156 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:56,176 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:45883 connecting to ZooKeeper ensemble=127.0.0.1:62515 2024-12-16T19:43:56,312 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458830x0, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T19:43:56,319 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45883-0x100e481a0950000 connected 2024-12-16T19:43:56,442 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T19:43:56,447 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:43:56,474 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T19:43:56,479 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45883 2024-12-16T19:43:56,479 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45883 2024-12-16T19:43:56,480 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45883 2024-12-16T19:43:56,482 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45883 2024-12-16T19:43:56,482 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45883 2024-12-16T19:43:56,494 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c, hbase.cluster.distributed=false 2024-12-16T19:43:56,606 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T19:43:56,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:56,607 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:56,608 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T19:43:56,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:56,608 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T19:43:56,611 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T19:43:56,614 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T19:43:56,627 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:45217 2024-12-16T19:43:56,630 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T19:43:56,647 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-16T19:43:56,649 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:56,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:56,672 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:45217 connecting to ZooKeeper ensemble=127.0.0.1:62515 2024-12-16T19:43:56,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:452170x0, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T19:43:56,717 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:452170x0, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T19:43:56,718 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45217-0x100e481a0950001 connected 2024-12-16T19:43:56,736 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:43:56,750 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T19:43:56,774 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45217 2024-12-16T19:43:56,782 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45217 2024-12-16T19:43:56,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45217 2024-12-16T19:43:56,810 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45217 2024-12-16T19:43:56,814 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45217 2024-12-16T19:43:56,852 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T19:43:56,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:56,852 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:56,853 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T19:43:56,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:56,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T19:43:56,853 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T19:43:56,854 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T19:43:56,855 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42741 2024-12-16T19:43:56,856 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T19:43:56,862 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-16T19:43:56,863 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:56,868 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:56,873 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42741 connecting to ZooKeeper ensemble=127.0.0.1:62515 2024-12-16T19:43:56,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427410x0, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T19:43:56,911 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:427410x0, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T19:43:56,914 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42741-0x100e481a0950002 connected 2024-12-16T19:43:56,923 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:43:56,927 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T19:43:56,942 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42741 2024-12-16T19:43:56,944 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42741 2024-12-16T19:43:56,966 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42741 2024-12-16T19:43:56,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42741 2024-12-16T19:43:56,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42741 2024-12-16T19:43:57,027 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/7a6e4bf70377:0 server-side Connection retries=45 2024-12-16T19:43:57,027 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:57,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:57,028 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-16T19:43:57,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-16T19:43:57,028 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-16T19:43:57,029 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-16T19:43:57,030 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-16T19:43:57,038 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42861 2024-12-16T19:43:57,039 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-16T19:43:57,066 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-12-16T19:43:57,068 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:57,072 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:57,077 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42861 connecting to ZooKeeper ensemble=127.0.0.1:62515 2024-12-16T19:43:57,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428610x0, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-16T19:43:57,091 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428610x0, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T19:43:57,100 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428610x0, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:43:57,103 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:428610x0, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-16T19:43:57,113 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42861-0x100e481a0950003 connected 2024-12-16T19:43:57,114 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42861 2024-12-16T19:43:57,115 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42861 2024-12-16T19:43:57,142 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42861 2024-12-16T19:43:57,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42861 2024-12-16T19:43:57,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42861 2024-12-16T19:43:57,171 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:57,203 DEBUG [M:0;7a6e4bf70377:45883 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7a6e4bf70377:45883 2024-12-16T19:43:57,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,210 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,222 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:57,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T19:43:57,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T19:43:57,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T19:43:57,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-16T19:43:57,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,301 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T19:43:57,303 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7a6e4bf70377,45883,1734378235170 from backup master directory 2024-12-16T19:43:57,307 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-16T19:43:57,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,319 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:57,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-16T19:43:57,324 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T19:43:57,324 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:57,328 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-16T19:43:57,333 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-16T19:43:57,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741826_1002 (size=42) 2024-12-16T19:43:57,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741826_1002 (size=42) 2024-12-16T19:43:57,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741826_1002 (size=42) 2024-12-16T19:43:57,581 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/hbase.id with ID: a5dc5780-2f08-42af-8266-8013110bfe67 2024-12-16T19:43:57,718 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-16T19:43:57,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,795 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:57,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741827_1003 (size=196) 2024-12-16T19:43:57,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741827_1003 (size=196) 2024-12-16T19:43:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741827_1003 (size=196) 2024-12-16T19:43:57,929 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:43:57,931 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-16T19:43:57,969 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:43:57,977 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T19:43:58,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741828_1004 (size=1189) 2024-12-16T19:43:58,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741828_1004 (size=1189) 2024-12-16T19:43:58,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741828_1004 (size=1189) 2024-12-16T19:43:58,109 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/data/master/store 2024-12-16T19:43:58,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741829_1005 (size=34) 2024-12-16T19:43:58,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741829_1005 (size=34) 2024-12-16T19:43:58,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741829_1005 (size=34) 2024-12-16T19:43:58,233 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-16T19:43:58,234 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:43:58,235 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T19:43:58,236 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:43:58,236 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:43:58,236 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T19:43:58,236 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:43:58,236 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:43:58,236 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T19:43:58,246 WARN [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/data/master/store/.initializing 2024-12-16T19:43:58,247 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:58,267 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T19:43:58,303 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C45883%2C1734378235170, suffix=, logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170, archiveDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/oldWALs, maxLogs=10 2024-12-16T19:43:58,342 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313, exclude list is [], retry=0 2024-12-16T19:43:58,386 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK] 2024-12-16T19:43:58,387 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK] 2024-12-16T19:43:58,388 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK] 2024-12-16T19:43:58,391 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-16T19:43:58,487 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 2024-12-16T19:43:58,491 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34263:34263),(127.0.0.1/127.0.0.1:45671:45671),(127.0.0.1/127.0.0.1:33371:33371)] 2024-12-16T19:43:58,492 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-16T19:43:58,493 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:43:58,500 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,502 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,569 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-16T19:43:58,632 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:58,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:43:58,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-16T19:43:58,650 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:58,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:43:58,652 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,656 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-16T19:43:58,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:58,659 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:43:58,660 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-16T19:43:58,666 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:58,678 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:43:58,684 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,687 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,708 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-16T19:43:58,725 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-16T19:43:58,738 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:43:58,741 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71481740, jitterRate=0.06516093015670776}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-16T19:43:58,746 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T19:43:58,750 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-16T19:43:58,810 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b937fc6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:43:58,856 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-16T19:43:58,877 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-16T19:43:58,877 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-16T19:43:58,880 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-16T19:43:58,884 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 3 msec 2024-12-16T19:43:58,890 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-16T19:43:58,891 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-16T19:43:58,960 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-16T19:43:58,989 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-16T19:43:59,050 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-16T19:43:59,061 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-16T19:43:59,073 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-16T19:43:59,085 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-16T19:43:59,090 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-16T19:43:59,108 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-16T19:43:59,132 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-16T19:43:59,136 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-16T19:43:59,158 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-16T19:43:59,178 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-16T19:43:59,189 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,204 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=7a6e4bf70377,45883,1734378235170, sessionid=0x100e481a0950000, setting cluster-up flag (Was=false) 2024-12-16T19:43:59,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,236 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,281 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-16T19:43:59,288 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:59,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,326 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:43:59,356 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-16T19:43:59,360 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7a6e4bf70377,45883,1734378235170 2024-12-16T19:43:59,446 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7a6e4bf70377:42741 2024-12-16T19:43:59,451 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;7a6e4bf70377:42861 2024-12-16T19:43:59,455 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7a6e4bf70377:45217 2024-12-16T19:43:59,459 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1008): ClusterId : a5dc5780-2f08-42af-8266-8013110bfe67 2024-12-16T19:43:59,460 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1008): ClusterId : a5dc5780-2f08-42af-8266-8013110bfe67 2024-12-16T19:43:59,461 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1008): ClusterId : a5dc5780-2f08-42af-8266-8013110bfe67 2024-12-16T19:43:59,463 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T19:43:59,464 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T19:43:59,464 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-16T19:43:59,505 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T19:43:59,505 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T19:43:59,506 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T19:43:59,506 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T19:43:59,512 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-16T19:43:59,519 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:43:59,519 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-16T19:43:59,526 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-16T19:43:59,526 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-16T19:43:59,544 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T19:43:59,544 DEBUG [RS:0;7a6e4bf70377:45217 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a90e0c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:43:59,546 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T19:43:59,547 DEBUG [RS:2;7a6e4bf70377:42861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73a836c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:43:59,549 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-16T19:43:59,550 DEBUG [RS:1;7a6e4bf70377:42741 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a4d2e51, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:43:59,550 DEBUG [RS:0;7a6e4bf70377:45217 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d881b95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T19:43:59,554 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T19:43:59,555 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T19:43:59,555 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-16T19:43:59,555 INFO [RS:0;7a6e4bf70377:45217 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:43:59,555 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T19:43:59,560 DEBUG [RS:1;7a6e4bf70377:42741 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45386bbc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T19:43:59,560 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T19:43:59,561 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T19:43:59,561 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-16T19:43:59,561 INFO [RS:1;7a6e4bf70377:42741 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:43:59,561 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T19:43:59,563 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,45883,1734378235170 with isa=7a6e4bf70377/172.17.0.2:45217, startcode=1734378236605 2024-12-16T19:43:59,568 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,45883,1734378235170 with isa=7a6e4bf70377/172.17.0.2:42741, startcode=1734378236851 2024-12-16T19:43:59,570 DEBUG [RS:2;7a6e4bf70377:42861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f9bd3b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T19:43:59,570 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-16T19:43:59,571 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-16T19:43:59,571 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-16T19:43:59,571 INFO [RS:2;7a6e4bf70377:42861 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:43:59,571 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-16T19:43:59,578 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,45883,1734378235170 with isa=7a6e4bf70377/172.17.0.2:42861, startcode=1734378237024 2024-12-16T19:43:59,584 DEBUG [RS:2;7a6e4bf70377:42861 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T19:43:59,584 DEBUG [RS:1;7a6e4bf70377:42741 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T19:43:59,585 DEBUG [RS:0;7a6e4bf70377:45217 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T19:43:59,653 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-16T19:43:59,664 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38295, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T19:43:59,667 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44275, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T19:43:59,670 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36057, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T19:43:59,671 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-16T19:43:59,679 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-16T19:43:59,684 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T19:43:59,699 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T19:43:59,697 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7a6e4bf70377,45883,1734378235170 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-16T19:43:59,700 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-16T19:43:59,722 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T19:43:59,722 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T19:43:59,722 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T19:43:59,722 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=5, maxPoolSize=5 2024-12-16T19:43:59,723 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7a6e4bf70377:0, corePoolSize=10, maxPoolSize=10 2024-12-16T19:43:59,723 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:43:59,723 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T19:43:59,723 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:43:59,732 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-16T19:43:59,732 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-16T19:43:59,732 WARN [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-16T19:43:59,732 WARN [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-16T19:43:59,732 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-16T19:43:59,732 WARN [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-16T19:43:59,743 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734378269743 2024-12-16T19:43:59,746 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-16T19:43:59,748 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-16T19:43:59,753 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-16T19:43:59,754 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-16T19:43:59,755 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-16T19:43:59,763 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-16T19:43:59,766 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T19:43:59,767 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-16T19:43:59,778 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T19:43:59,784 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:59,788 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-16T19:43:59,784 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T19:43:59,790 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-16T19:43:59,791 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-16T19:43:59,802 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-16T19:43:59,803 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-16T19:43:59,808 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734378239804,5,FailOnTimeoutGroup] 2024-12-16T19:43:59,815 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734378239809,5,FailOnTimeoutGroup] 2024-12-16T19:43:59,815 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-16T19:43:59,815 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-16T19:43:59,817 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-16T19:43:59,818 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-16T19:43:59,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741831_1007 (size=1039) 2024-12-16T19:43:59,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741831_1007 (size=1039) 2024-12-16T19:43:59,834 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,45883,1734378235170 with isa=7a6e4bf70377/172.17.0.2:42741, startcode=1734378236851 2024-12-16T19:43:59,835 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,45883,1734378235170 with isa=7a6e4bf70377/172.17.0.2:45217, startcode=1734378236605 2024-12-16T19:43:59,836 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,42741,1734378236851 2024-12-16T19:43:59,837 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(3073): reportForDuty to master=7a6e4bf70377,45883,1734378235170 with isa=7a6e4bf70377/172.17.0.2:42861, startcode=1734378237024 2024-12-16T19:43:59,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741831_1007 (size=1039) 2024-12-16T19:43:59,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,42741,1734378236851 2024-12-16T19:43:59,841 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-16T19:43:59,842 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:43:59,860 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,45217,1734378236605 2024-12-16T19:43:59,861 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,45217,1734378236605 2024-12-16T19:43:59,863 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:43:59,863 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38709 2024-12-16T19:43:59,863 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T19:43:59,878 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 7a6e4bf70377,42861,1734378237024 2024-12-16T19:43:59,879 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] master.ServerManager(486): Registering regionserver=7a6e4bf70377,42861,1734378237024 2024-12-16T19:43:59,880 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:43:59,880 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38709 2024-12-16T19:43:59,880 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T19:43:59,887 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:43:59,887 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:38709 2024-12-16T19:43:59,888 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-16T19:43:59,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741832_1008 (size=32) 2024-12-16T19:43:59,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741832_1008 (size=32) 2024-12-16T19:43:59,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741832_1008 (size=32) 2024-12-16T19:43:59,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:43:59,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T19:43:59,927 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T19:43:59,933 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T19:43:59,933 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:59,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:43:59,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T19:43:59,941 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T19:43:59,941 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:59,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:43:59,943 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T19:43:59,948 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T19:43:59,948 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:43:59,952 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:43:59,955 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740 2024-12-16T19:43:59,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740 2024-12-16T19:43:59,963 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-16T19:43:59,963 DEBUG [RS:1;7a6e4bf70377:42741 {}] zookeeper.ZKUtil(111): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,42741,1734378236851 2024-12-16T19:43:59,964 WARN [RS:1;7a6e4bf70377:42741 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T19:43:59,964 INFO [RS:1;7a6e4bf70377:42741 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T19:43:59,964 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851 2024-12-16T19:43:59,966 DEBUG [RS:0;7a6e4bf70377:45217 {}] zookeeper.ZKUtil(111): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,45217,1734378236605 2024-12-16T19:43:59,966 WARN [RS:0;7a6e4bf70377:45217 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T19:43:59,966 INFO [RS:0;7a6e4bf70377:45217 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T19:43:59,966 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,45217,1734378236605 2024-12-16T19:43:59,966 DEBUG [RS:2;7a6e4bf70377:42861 {}] zookeeper.ZKUtil(111): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7a6e4bf70377,42861,1734378237024 2024-12-16T19:43:59,966 WARN [RS:2;7a6e4bf70377:42861 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-16T19:43:59,967 INFO [RS:2;7a6e4bf70377:42861 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T19:43:59,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,45217,1734378236605] 2024-12-16T19:43:59,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,42861,1734378237024] 2024-12-16T19:43:59,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7a6e4bf70377,42741,1734378236851] 2024-12-16T19:43:59,967 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42861,1734378237024 2024-12-16T19:43:59,986 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T19:43:59,996 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T19:44:00,022 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:00,024 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70899492, jitterRate=0.056484758853912354}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-16T19:44:00,026 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T19:44:00,027 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-16T19:44:00,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T19:44:00,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T19:44:00,028 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T19:44:00,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T19:44:00,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T19:44:00,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T19:44:00,035 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T19:44:00,038 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T19:44:00,040 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-16T19:44:00,042 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T19:44:00,042 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T19:44:00,052 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-16T19:44:00,053 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-16T19:44:00,063 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-16T19:44:00,072 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T19:44:00,082 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T19:44:00,089 INFO [RS:0;7a6e4bf70377:45217 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T19:44:00,092 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-16T19:44:00,090 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,098 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-16T19:44:00,102 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-16T19:44:00,106 INFO [RS:1;7a6e4bf70377:42741 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T19:44:00,106 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,107 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T19:44:00,113 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T19:44:00,114 INFO [RS:2;7a6e4bf70377:42861 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-16T19:44:00,114 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,117 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,118 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,118 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,118 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,118 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,118 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,118 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-16T19:44:00,118 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,118 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,118 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T19:44:00,119 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,119 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,119 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,119 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,119 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,119 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T19:44:00,120 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,120 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,120 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,121 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,121 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,121 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,121 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T19:44:00,121 DEBUG [RS:0;7a6e4bf70377:45217 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T19:44:00,122 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,123 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,123 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,123 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T19:44:00,123 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7a6e4bf70377:0, corePoolSize=2, maxPoolSize=2 2024-12-16T19:44:00,124 DEBUG [RS:1;7a6e4bf70377:42741 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T19:44:00,124 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,124 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,124 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,124 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,124 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7a6e4bf70377:0, corePoolSize=1, maxPoolSize=1 2024-12-16T19:44:00,124 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T19:44:00,125 DEBUG [RS:2;7a6e4bf70377:42861 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7a6e4bf70377:0, corePoolSize=3, maxPoolSize=3 2024-12-16T19:44:00,142 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,142 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,143 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,143 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,143 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45217,1734378236605-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T19:44:00,158 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,158 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,158 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,158 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,158 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,158 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42861,1734378237024-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T19:44:00,159 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,159 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,159 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,159 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42741,1734378236851-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T19:44:00,185 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T19:44:00,193 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45217,1734378236605-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,197 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T19:44:00,198 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42861,1734378237024-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,205 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-16T19:44:00,205 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,42741,1734378236851-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:00,223 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.Replication(204): 7a6e4bf70377,42861,1734378237024 started 2024-12-16T19:44:00,223 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,42861,1734378237024, RpcServer on 7a6e4bf70377/172.17.0.2:42861, sessionid=0x100e481a0950003 2024-12-16T19:44:00,224 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T19:44:00,224 DEBUG [RS:2;7a6e4bf70377:42861 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:00,224 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42861,1734378237024' 2024-12-16T19:44:00,224 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T19:44:00,225 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.Replication(204): 7a6e4bf70377,45217,1734378236605 started 2024-12-16T19:44:00,225 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,45217,1734378236605, RpcServer on 7a6e4bf70377/172.17.0.2:45217, sessionid=0x100e481a0950001 2024-12-16T19:44:00,225 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T19:44:00,225 DEBUG [RS:0;7a6e4bf70377:45217 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:00,225 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T19:44:00,225 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,45217,1734378236605' 2024-12-16T19:44:00,225 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T19:44:00,226 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T19:44:00,226 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T19:44:00,226 DEBUG [RS:2;7a6e4bf70377:42861 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:00,226 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42861,1734378237024' 2024-12-16T19:44:00,226 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T19:44:00,227 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T19:44:00,227 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T19:44:00,228 DEBUG [RS:2;7a6e4bf70377:42861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T19:44:00,228 INFO [RS:2;7a6e4bf70377:42861 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T19:44:00,228 INFO [RS:2;7a6e4bf70377:42861 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T19:44:00,229 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T19:44:00,229 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T19:44:00,229 DEBUG [RS:0;7a6e4bf70377:45217 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:00,229 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,45217,1734378236605' 2024-12-16T19:44:00,229 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T19:44:00,230 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T19:44:00,231 DEBUG [RS:0;7a6e4bf70377:45217 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T19:44:00,231 INFO [RS:0;7a6e4bf70377:45217 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T19:44:00,231 INFO [RS:0;7a6e4bf70377:45217 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T19:44:00,236 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.Replication(204): 7a6e4bf70377,42741,1734378236851 started 2024-12-16T19:44:00,236 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1767): Serving as 7a6e4bf70377,42741,1734378236851, RpcServer on 7a6e4bf70377/172.17.0.2:42741, sessionid=0x100e481a0950002 2024-12-16T19:44:00,237 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-16T19:44:00,237 DEBUG [RS:1;7a6e4bf70377:42741 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:00,237 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42741,1734378236851' 2024-12-16T19:44:00,237 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-16T19:44:00,239 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-16T19:44:00,244 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-16T19:44:00,244 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-16T19:44:00,244 DEBUG [RS:1;7a6e4bf70377:42741 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:00,245 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7a6e4bf70377,42741,1734378236851' 2024-12-16T19:44:00,245 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-16T19:44:00,247 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-16T19:44:00,248 DEBUG [RS:1;7a6e4bf70377:42741 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-16T19:44:00,248 INFO [RS:1;7a6e4bf70377:42741 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-16T19:44:00,248 INFO [RS:1;7a6e4bf70377:42741 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-16T19:44:00,252 WARN [7a6e4bf70377:45883 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-16T19:44:00,334 INFO [RS:2;7a6e4bf70377:42861 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T19:44:00,334 INFO [RS:0;7a6e4bf70377:45217 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T19:44:00,338 INFO [RS:0;7a6e4bf70377:45217 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C45217%2C1734378236605, suffix=, logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,45217,1734378236605, archiveDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs, maxLogs=32 2024-12-16T19:44:00,340 INFO [RS:2;7a6e4bf70377:42861 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42861%2C1734378237024, suffix=, logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42861,1734378237024, archiveDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs, maxLogs=32 2024-12-16T19:44:00,350 INFO [RS:1;7a6e4bf70377:42741 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-16T19:44:00,367 DEBUG [RS:0;7a6e4bf70377:45217 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,45217,1734378236605/7a6e4bf70377%2C45217%2C1734378236605.1734378240344, exclude list is [], retry=0 2024-12-16T19:44:00,369 INFO [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42741%2C1734378236851, suffix=, logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851, archiveDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs, maxLogs=32 2024-12-16T19:44:00,384 DEBUG [RS:2;7a6e4bf70377:42861 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42861,1734378237024/7a6e4bf70377%2C42861%2C1734378237024.1734378240343, exclude list is [], retry=0 2024-12-16T19:44:00,394 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK] 2024-12-16T19:44:00,395 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK] 2024-12-16T19:44:00,396 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK] 2024-12-16T19:44:00,461 DEBUG [RS:1;7a6e4bf70377:42741 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851/7a6e4bf70377%2C42741%2C1734378236851.1734378240371, exclude list is [], retry=0 2024-12-16T19:44:00,465 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK] 2024-12-16T19:44:00,468 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK] 2024-12-16T19:44:00,472 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK] 2024-12-16T19:44:00,477 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK] 2024-12-16T19:44:00,479 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK] 2024-12-16T19:44:00,481 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK] 2024-12-16T19:44:00,535 INFO [RS:0;7a6e4bf70377:45217 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,45217,1734378236605/7a6e4bf70377%2C45217%2C1734378236605.1734378240344 2024-12-16T19:44:00,551 INFO [RS:2;7a6e4bf70377:42861 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42861,1734378237024/7a6e4bf70377%2C42861%2C1734378237024.1734378240343 2024-12-16T19:44:00,552 DEBUG [RS:2;7a6e4bf70377:42861 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34263:34263),(127.0.0.1/127.0.0.1:33371:33371),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-16T19:44:00,559 DEBUG [RS:0;7a6e4bf70377:45217 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34263:34263),(127.0.0.1/127.0.0.1:33371:33371),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-16T19:44:00,571 INFO [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851/7a6e4bf70377%2C42741%2C1734378236851.1734378240371 2024-12-16T19:44:00,571 DEBUG [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34263:34263),(127.0.0.1/127.0.0.1:33371:33371),(127.0.0.1/127.0.0.1:45671:45671)] 2024-12-16T19:44:00,760 DEBUG [7a6e4bf70377:45883 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-16T19:44:00,764 DEBUG [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:44:00,791 DEBUG [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:44:00,791 DEBUG [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:44:00,791 DEBUG [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:44:00,792 INFO [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:44:00,792 INFO [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:44:00,792 INFO [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:44:00,792 DEBUG [7a6e4bf70377:45883 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:44:00,798 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:00,805 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,42741,1734378236851, state=OPENING 2024-12-16T19:44:00,822 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-16T19:44:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:00,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:00,832 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:00,833 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:00,833 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:00,834 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:00,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:44:00,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:01,052 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:01,055 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T19:44:01,088 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T19:44:01,133 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-16T19:44:01,133 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-16T19:44:01,134 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-16T19:44:01,143 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7a6e4bf70377%2C42741%2C1734378236851.meta, suffix=.meta, logDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851, archiveDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs, maxLogs=32 2024-12-16T19:44:01,199 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851/7a6e4bf70377%2C42741%2C1734378236851.meta.1734378241145.meta, exclude list is [], retry=0 2024-12-16T19:44:01,205 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK] 2024-12-16T19:44:01,205 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK] 2024-12-16T19:44:01,207 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK] 2024-12-16T19:44:01,260 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/WALs/7a6e4bf70377,42741,1734378236851/7a6e4bf70377%2C42741%2C1734378236851.meta.1734378241145.meta 2024-12-16T19:44:01,263 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34263:34263),(127.0.0.1/127.0.0.1:45671:45671),(127.0.0.1/127.0.0.1:33371:33371)] 2024-12-16T19:44:01,263 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-16T19:44:01,275 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-16T19:44:01,277 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:01,278 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-16T19:44:01,280 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-16T19:44:01,283 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-16T19:44:01,315 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-16T19:44:01,315 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:01,316 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-16T19:44:01,316 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-16T19:44:01,340 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-16T19:44:01,352 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-16T19:44:01,353 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:01,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:44:01,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-16T19:44:01,363 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-16T19:44:01,363 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:01,366 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:44:01,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-16T19:44:01,370 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-16T19:44:01,370 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:01,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-16T19:44:01,381 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740 2024-12-16T19:44:01,390 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740 2024-12-16T19:44:01,401 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-16T19:44:01,407 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-16T19:44:01,410 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66507847, jitterRate=-0.008955851197242737}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-16T19:44:01,415 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-16T19:44:01,433 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734378241036 2024-12-16T19:44:01,454 DEBUG [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-16T19:44:01,455 INFO [RS_OPEN_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-16T19:44:01,463 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:01,468 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7a6e4bf70377,42741,1734378236851, state=OPEN 2024-12-16T19:44:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T19:44:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T19:44:01,506 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T19:44:01,506 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:01,506 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-16T19:44:01,506 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-16T19:44:01,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-16T19:44:01,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=7a6e4bf70377,42741,1734378236851 in 672 msec 2024-12-16T19:44:01,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-16T19:44:01,531 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.4570 sec 2024-12-16T19:44:01,540 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 2.0100 sec 2024-12-16T19:44:01,540 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734378241540, completionTime=-1 2024-12-16T19:44:01,541 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-16T19:44:01,541 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-16T19:44:01,633 DEBUG [hconnection-0x736823cc-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:01,637 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:01,662 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-16T19:44:01,662 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734378301662 2024-12-16T19:44:01,662 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734378361662 2024-12-16T19:44:01,662 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 121 msec 2024-12-16T19:44:01,755 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:44:01,850 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45883,1734378235170-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:01,855 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45883,1734378235170-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:01,856 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45883,1734378235170-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:01,859 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7a6e4bf70377:45883, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:01,880 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:01,893 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-16T19:44:01,895 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T19:44:01,912 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T19:44:01,935 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-16T19:44:01,943 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:44:01,944 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:01,950 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:44:01,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741837_1013 (size=358) 2024-12-16T19:44:01,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741837_1013 (size=358) 2024-12-16T19:44:01,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741837_1013 (size=358) 2024-12-16T19:44:01,995 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3893a032022fb38e1a88fc4547193141, NAME => 'hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:02,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741838_1014 (size=42) 2024-12-16T19:44:02,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741838_1014 (size=42) 2024-12-16T19:44:02,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741838_1014 (size=42) 2024-12-16T19:44:02,515 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:02,515 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 3893a032022fb38e1a88fc4547193141, disabling compactions & flushes 2024-12-16T19:44:02,515 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:02,515 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:02,515 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. after waiting 0 ms 2024-12-16T19:44:02,515 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:02,515 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:02,515 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3893a032022fb38e1a88fc4547193141: 2024-12-16T19:44:02,518 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:44:02,524 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734378242519"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378242519"}]},"ts":"1734378242519"} 2024-12-16T19:44:02,570 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T19:44:02,575 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:44:02,579 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378242575"}]},"ts":"1734378242575"} 2024-12-16T19:44:02,587 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-16T19:44:02,614 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:44:02,616 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:44:02,616 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:44:02,616 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:44:02,616 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:44:02,616 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:44:02,616 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:44:02,616 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:44:02,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3893a032022fb38e1a88fc4547193141, ASSIGN}] 2024-12-16T19:44:02,621 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=3893a032022fb38e1a88fc4547193141, ASSIGN 2024-12-16T19:44:02,626 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=3893a032022fb38e1a88fc4547193141, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:44:02,778 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T19:44:02,779 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3893a032022fb38e1a88fc4547193141, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:02,789 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 3893a032022fb38e1a88fc4547193141, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:44:02,950 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:02,958 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:02,958 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 3893a032022fb38e1a88fc4547193141, NAME => 'hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141.', STARTKEY => '', ENDKEY => ''} 2024-12-16T19:44:02,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. service=AccessControlService 2024-12-16T19:44:02,959 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:02,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,959 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:02,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,960 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,966 INFO [StoreOpener-3893a032022fb38e1a88fc4547193141-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,969 INFO [StoreOpener-3893a032022fb38e1a88fc4547193141-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3893a032022fb38e1a88fc4547193141 columnFamilyName info 2024-12-16T19:44:02,969 DEBUG [StoreOpener-3893a032022fb38e1a88fc4547193141-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:02,970 INFO [StoreOpener-3893a032022fb38e1a88fc4547193141-1 {}] regionserver.HStore(327): Store=3893a032022fb38e1a88fc4547193141/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:02,972 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,973 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,978 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 3893a032022fb38e1a88fc4547193141 2024-12-16T19:44:02,989 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:02,991 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 3893a032022fb38e1a88fc4547193141; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60224529, jitterRate=-0.1025845855474472}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:02,992 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 3893a032022fb38e1a88fc4547193141: 2024-12-16T19:44:02,995 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141., pid=6, masterSystemTime=1734378242950 2024-12-16T19:44:03,004 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:03,004 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:44:03,009 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=3893a032022fb38e1a88fc4547193141, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:03,024 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-16T19:44:03,034 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 3893a032022fb38e1a88fc4547193141, server=7a6e4bf70377,42741,1734378236851 in 226 msec 2024-12-16T19:44:03,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-16T19:44:03,045 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=3893a032022fb38e1a88fc4547193141, ASSIGN in 406 msec 2024-12-16T19:44:03,047 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:44:03,048 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378243048"}]},"ts":"1734378243048"} 2024-12-16T19:44:03,056 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-16T19:44:03,069 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:44:03,081 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.1730 sec 2024-12-16T19:44:03,142 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-16T19:44:03,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-16T19:44:03,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:03,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:03,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:03,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:03,230 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-16T19:44:03,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T19:44:03,276 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 50 msec 2024-12-16T19:44:03,286 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-16T19:44:03,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-16T19:44:03,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 48 msec 2024-12-16T19:44:03,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-16T19:44:03,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-16T19:44:03,422 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 6.098sec 2024-12-16T19:44:03,430 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-16T19:44:03,432 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-16T19:44:03,434 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-16T19:44:03,435 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-16T19:44:03,435 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-16T19:44:03,437 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45883,1734378235170-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-16T19:44:03,437 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45883,1734378235170-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-16T19:44:03,465 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3301b7a5 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ab334d5 2024-12-16T19:44:03,466 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-16T19:44:03,468 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-16T19:44:03,475 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-16T19:44:03,482 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:44:03,482 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:03,484 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-16T19:44:03,489 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T19:44:03,491 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:44:03,510 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37fdf9c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:03,516 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-16T19:44:03,516 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-16T19:44:03,547 DEBUG [hconnection-0x304e9acc-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:03,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741839_1015 (size=349) 2024-12-16T19:44:03,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741839_1015 (size=349) 2024-12-16T19:44:03,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741839_1015 (size=349) 2024-12-16T19:44:03,579 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3e93dad405937162271b1d330fba4cbb, NAME => 'hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:03,592 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52410, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:03,595 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T19:44:03,595 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=7a6e4bf70377,45883,1734378235170 2024-12-16T19:44:03,595 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-16T19:44:03,596 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/test.cache.data in system properties and HBase conf 2024-12-16T19:44:03,596 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.tmp.dir in system properties and HBase conf 2024-12-16T19:44:03,596 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir in system properties and HBase conf 2024-12-16T19:44:03,596 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T19:44:03,597 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/nfs.dump.dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-16T19:44:03,598 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-16T19:44:03,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741840_1016 (size=36) 2024-12-16T19:44:03,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741840_1016 (size=36) 2024-12-16T19:44:03,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741840_1016 (size=36) 2024-12-16T19:44:03,623 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:03,623 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 3e93dad405937162271b1d330fba4cbb, disabling compactions & flushes 2024-12-16T19:44:03,623 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:03,624 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:03,624 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. after waiting 0 ms 2024-12-16T19:44:03,624 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:03,624 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:03,624 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3e93dad405937162271b1d330fba4cbb: 2024-12-16T19:44:03,630 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:44:03,631 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1734378243631"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378243631"}]},"ts":"1734378243631"} 2024-12-16T19:44:03,637 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-16T19:44:03,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:44:03,645 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378243644"}]},"ts":"1734378243644"} 2024-12-16T19:44:03,649 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-16T19:44:03,703 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:44:03,706 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:44:03,706 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:44:03,706 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:44:03,706 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:44:03,706 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:44:03,710 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:44:03,710 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:44:03,710 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=3e93dad405937162271b1d330fba4cbb, ASSIGN}] 2024-12-16T19:44:03,728 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=3e93dad405937162271b1d330fba4cbb, ASSIGN 2024-12-16T19:44:03,734 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=3e93dad405937162271b1d330fba4cbb, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:44:03,795 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T19:44:03,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741841_1017 (size=592039) 2024-12-16T19:44:03,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741841_1017 (size=592039) 2024-12-16T19:44:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741841_1017 (size=592039) 2024-12-16T19:44:03,885 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T19:44:03,886 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3e93dad405937162271b1d330fba4cbb, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:03,902 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 3e93dad405937162271b1d330fba4cbb, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:44:04,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741842_1018 (size=1663647) 2024-12-16T19:44:04,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741842_1018 (size=1663647) 2024-12-16T19:44:04,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741842_1018 (size=1663647) 2024-12-16T19:44:04,062 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:04,063 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T19:44:04,068 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T19:44:04,096 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T19:44:04,168 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:04,168 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 3e93dad405937162271b1d330fba4cbb, NAME => 'hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb.', STARTKEY => '', ENDKEY => ''} 2024-12-16T19:44:04,169 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. service=AccessControlService 2024-12-16T19:44:04,169 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:04,170 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,170 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:04,170 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,170 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,294 INFO [StoreOpener-3e93dad405937162271b1d330fba4cbb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,298 INFO [StoreOpener-3e93dad405937162271b1d330fba4cbb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3e93dad405937162271b1d330fba4cbb columnFamilyName l 2024-12-16T19:44:04,298 DEBUG [StoreOpener-3e93dad405937162271b1d330fba4cbb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:04,306 INFO [StoreOpener-3e93dad405937162271b1d330fba4cbb-1 {}] regionserver.HStore(327): Store=3e93dad405937162271b1d330fba4cbb/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:04,310 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,311 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,330 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 3e93dad405937162271b1d330fba4cbb 2024-12-16T19:44:04,358 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:04,368 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 3e93dad405937162271b1d330fba4cbb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66692704, jitterRate=-0.006201267242431641}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:04,371 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 3e93dad405937162271b1d330fba4cbb: 2024-12-16T19:44:04,382 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb., pid=11, masterSystemTime=1734378244062 2024-12-16T19:44:04,393 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:04,393 INFO [RS_OPEN_PRIORITY_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:44:04,397 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=3e93dad405937162271b1d330fba4cbb, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:04,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-16T19:44:04,445 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 3e93dad405937162271b1d330fba4cbb, server=7a6e4bf70377,45217,1734378236605 in 506 msec 2024-12-16T19:44:04,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-16T19:44:04,454 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=3e93dad405937162271b1d330fba4cbb, ASSIGN in 735 msec 2024-12-16T19:44:04,457 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:44:04,457 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378244457"}]},"ts":"1734378244457"} 2024-12-16T19:44:04,473 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-16T19:44:04,511 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:44:04,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 1.0440 sec 2024-12-16T19:44:04,597 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-16T19:44:04,598 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-16T19:44:04,642 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-16T19:44:04,643 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-16T19:44:04,644 INFO [master/7a6e4bf70377:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7a6e4bf70377,45883,1734378235170-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-16T19:44:06,013 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-16T19:44:06,018 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-16T19:44:06,032 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-16T19:44:06,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:44:06,040 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-16T19:44:06,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-16T19:44:06,041 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:06,071 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:44:06,071 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-16T19:44:06,072 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-16T19:44:06,072 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:06,073 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T19:44:06,073 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:06,075 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-16T19:44:06,075 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:06,077 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:44:06,077 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-16T19:44:06,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-16T19:44:06,078 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-16T19:44:06,078 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T19:44:06,078 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-16T19:44:06,521 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-nodemanager.properties,hadoop-metrics2.properties 2024-12-16T19:44:07,110 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:07,189 WARN [Thread-401 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:07,735 INFO [Thread-401 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:44:07,735 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-16T19:44:07,736 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:44:07,761 INFO [Thread-401 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:44:07,761 INFO [Thread-401 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:44:07,761 INFO [Thread-401 {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T19:44:07,766 INFO [Thread-401 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c1ae3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:44:07,766 INFO [Thread-401 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ce19e11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T19:44:07,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:44:07,812 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:44:07,812 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T19:44:07,815 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:07,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@216a9072{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:44:07,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ff0711a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T19:44:08,018 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-16T19:44:08,019 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-16T19:44:08,019 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-16T19:44:08,021 INFO [Thread-401 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-16T19:44:08,260 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:08,841 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:09,363 INFO [Thread-401 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:09,409 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@95ddd44{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-46585-hadoop-yarn-common-3_4_1_jar-_-any-2951022516745997296/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-16T19:44:09,409 INFO [Thread-401 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea41eaa{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-40507-hadoop-yarn-common-3_4_1_jar-_-any-10529836570505775620/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-16T19:44:09,410 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@18f96a02{HTTP/1.1, (http/1.1)}{localhost:46585} 2024-12-16T19:44:09,410 INFO [Time-limited test {}] server.Server(415): Started @23946ms 2024-12-16T19:44:09,422 INFO [Thread-401 {}] server.AbstractConnector(333): Started ServerConnector@6c65d0ae{HTTP/1.1, (http/1.1)}{localhost:40507} 2024-12-16T19:44:09,422 INFO [Thread-401 {}] server.Server(415): Started @23959ms 2024-12-16T19:44:09,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741843_1019 (size=5) 2024-12-16T19:44:09,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741843_1019 (size=5) 2024-12-16T19:44:09,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741843_1019 (size=5) 2024-12-16T19:44:11,023 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-16T19:44:11,030 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:11,099 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-16T19:44:11,100 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:44:11,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:44:11,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:44:11,104 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-16T19:44:11,110 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:11,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b0f1d9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:44:11,111 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77ddd41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T19:44:11,203 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-16T19:44:11,203 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-16T19:44:11,203 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-16T19:44:11,203 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-16T19:44:11,218 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:11,249 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:11,426 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:11,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@775e4a7b{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-37477-hadoop-yarn-common-3_4_1_jar-_-any-17934006111877753364/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T19:44:11,442 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@95a6b81{HTTP/1.1, (http/1.1)}{localhost:37477} 2024-12-16T19:44:11,443 INFO [Time-limited test {}] server.Server(415): Started @25979ms 2024-12-16T19:44:11,802 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-16T19:44:11,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:11,858 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-16T19:44:11,859 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-16T19:44:11,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-16T19:44:11,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-16T19:44:11,935 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-16T19:44:11,939 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-16T19:44:11,943 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44abea2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,AVAILABLE} 2024-12-16T19:44:11,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5b8566ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-16T19:44:12,041 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-16T19:44:12,041 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-16T19:44:12,042 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-16T19:44:12,042 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-16T19:44:12,053 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:12,060 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:12,183 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-16T19:44:12,188 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2d9dead1{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/java.io.tmpdir/jetty-localhost-33349-hadoop-yarn-common-3_4_1_jar-_-any-13197277736593776940/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T19:44:12,188 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@68244ab2{HTTP/1.1, (http/1.1)}{localhost:33349} 2024-12-16T19:44:12,188 INFO [Time-limited test {}] server.Server(415): Started @26725ms 2024-12-16T19:44:12,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-16T19:44:12,235 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:12,272 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=718, OpenFileDescriptor=775, MaxFileDescriptor=1048576, SystemLoadAverage=803, ProcessCount=11, AvailableMemoryMB=3945 2024-12-16T19:44:12,272 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=718 is superior to 500 2024-12-16T19:44:12,285 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-16T19:44:12,288 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-16T19:44:12,296 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:44:12,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:12,302 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:44:12,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-16T19:44:12,306 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:44:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T19:44:12,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741844_1020 (size=442) 2024-12-16T19:44:12,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741844_1020 (size=442) 2024-12-16T19:44:12,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741844_1020 (size=442) 2024-12-16T19:44:12,350 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 775d875c2b0f65030632e2caf55f9408, NAME => 'testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:12,350 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 413ac07087e3fd3ccce4c84d26ac31f5, NAME => 'testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:12,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741845_1021 (size=67) 2024-12-16T19:44:12,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741845_1021 (size=67) 2024-12-16T19:44:12,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741845_1021 (size=67) 2024-12-16T19:44:12,386 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:12,386 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 775d875c2b0f65030632e2caf55f9408, disabling compactions & flushes 2024-12-16T19:44:12,386 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,386 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,386 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. after waiting 0 ms 2024-12-16T19:44:12,386 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,387 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,387 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 775d875c2b0f65030632e2caf55f9408: 2024-12-16T19:44:12,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741846_1022 (size=67) 2024-12-16T19:44:12,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741846_1022 (size=67) 2024-12-16T19:44:12,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741846_1022 (size=67) 2024-12-16T19:44:12,395 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:12,395 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing 413ac07087e3fd3ccce4c84d26ac31f5, disabling compactions & flushes 2024-12-16T19:44:12,395 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,395 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,395 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. after waiting 0 ms 2024-12-16T19:44:12,395 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,395 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,395 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for 413ac07087e3fd3ccce4c84d26ac31f5: 2024-12-16T19:44:12,398 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:44:12,399 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734378252399"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378252399"}]},"ts":"1734378252399"} 2024-12-16T19:44:12,399 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1734378252399"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378252399"}]},"ts":"1734378252399"} 2024-12-16T19:44:12,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T19:44:12,442 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:44:12,445 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:44:12,445 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378252445"}]},"ts":"1734378252445"} 2024-12-16T19:44:12,459 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-16T19:44:12,490 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:44:12,494 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:44:12,495 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:44:12,495 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:44:12,495 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:44:12,495 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:44:12,495 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:44:12,495 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:44:12,495 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, ASSIGN}] 2024-12-16T19:44:12,500 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, ASSIGN 2024-12-16T19:44:12,500 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, ASSIGN 2024-12-16T19:44:12,502 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:44:12,502 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:44:12,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T19:44:12,653 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:44:12,654 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=413ac07087e3fd3ccce4c84d26ac31f5, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:12,654 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=775d875c2b0f65030632e2caf55f9408, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:12,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=13, state=RUNNABLE; OpenRegionProcedure 775d875c2b0f65030632e2caf55f9408, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:44:12,660 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=14, state=RUNNABLE; OpenRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:44:12,812 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:12,813 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T19:44:12,815 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:12,834 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T19:44:12,846 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,846 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => 775d875c2b0f65030632e2caf55f9408, NAME => 'testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:44:12,847 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. service=AccessControlService 2024-12-16T19:44:12,847 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:12,847 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,847 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,847 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:12,847 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => 413ac07087e3fd3ccce4c84d26ac31f5, NAME => 'testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. service=AccessControlService 2024-12-16T19:44:12,848 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,848 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,852 INFO [StoreOpener-775d875c2b0f65030632e2caf55f9408-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,858 INFO [StoreOpener-413ac07087e3fd3ccce4c84d26ac31f5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,860 INFO [StoreOpener-775d875c2b0f65030632e2caf55f9408-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 775d875c2b0f65030632e2caf55f9408 columnFamilyName cf 2024-12-16T19:44:12,861 INFO [StoreOpener-413ac07087e3fd3ccce4c84d26ac31f5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 413ac07087e3fd3ccce4c84d26ac31f5 columnFamilyName cf 2024-12-16T19:44:12,865 DEBUG [StoreOpener-413ac07087e3fd3ccce4c84d26ac31f5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:12,865 DEBUG [StoreOpener-775d875c2b0f65030632e2caf55f9408-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:12,866 INFO [StoreOpener-413ac07087e3fd3ccce4c84d26ac31f5-1 {}] regionserver.HStore(327): Store=413ac07087e3fd3ccce4c84d26ac31f5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:12,866 INFO [StoreOpener-775d875c2b0f65030632e2caf55f9408-1 {}] regionserver.HStore(327): Store=775d875c2b0f65030632e2caf55f9408/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:12,868 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,868 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,868 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,869 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,871 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:12,872 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:12,874 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:12,875 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened 413ac07087e3fd3ccce4c84d26ac31f5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63862793, jitterRate=-0.048370227217674255}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:12,876 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for 413ac07087e3fd3ccce4c84d26ac31f5: 2024-12-16T19:44:12,877 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5., pid=16, masterSystemTime=1734378252815 2024-12-16T19:44:12,879 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:12,880 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened 775d875c2b0f65030632e2caf55f9408; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66235688, jitterRate=-0.013011336326599121}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:12,880 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,880 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for 775d875c2b0f65030632e2caf55f9408: 2024-12-16T19:44:12,881 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:12,882 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=413ac07087e3fd3ccce4c84d26ac31f5, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:12,882 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408., pid=15, masterSystemTime=1734378252812 2024-12-16T19:44:12,888 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,888 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:12,890 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=775d875c2b0f65030632e2caf55f9408, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:12,897 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=14 2024-12-16T19:44:12,901 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=14, state=SUCCESS; OpenRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5, server=7a6e4bf70377,42741,1734378236851 in 225 msec 2024-12-16T19:44:12,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, ASSIGN in 402 msec 2024-12-16T19:44:12,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=13 2024-12-16T19:44:12,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=13, state=SUCCESS; OpenRegionProcedure 775d875c2b0f65030632e2caf55f9408, server=7a6e4bf70377,42861,1734378237024 in 237 msec 2024-12-16T19:44:12,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T19:44:12,934 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-16T19:44:12,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, ASSIGN in 416 msec 2024-12-16T19:44:12,942 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:44:12,943 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378252942"}]},"ts":"1734378252942"} 2024-12-16T19:44:12,946 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-16T19:44:12,961 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:44:12,965 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-16T19:44:12,972 DEBUG [PEWorker-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:12,980 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:12,990 DEBUG [hconnection-0x7d447c75-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:12,992 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58548, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-16T19:44:13,000 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-16T19:44:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:13,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:13,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:13,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:13,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:13,046 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:44:13,081 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:13,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:13,085 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:13,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 786 msec 2024-12-16T19:44:13,092 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-16T19:44:13,427 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-16T19:44:13,439 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-16T19:44:13,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:13,442 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:13,514 DEBUG [hconnection-0x304e9acc-shared-pool-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:13,517 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:13,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T19:44:13,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378253545 (current time:1734378253545). 2024-12-16T19:44:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:44:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-16T19:44:13,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:44:13,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c45310c to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@585e602c 2024-12-16T19:44:13,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21e04a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:13,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:13,572 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c45310c to 127.0.0.1:62515 2024-12-16T19:44:13,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:13,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x452f03fe to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43e5ed22 2024-12-16T19:44:13,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a255a96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:13,618 DEBUG [hconnection-0x5db47a3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:13,621 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58558, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:13,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:13,627 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41996, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x452f03fe to 127.0.0.1:62515 2024-12-16T19:44:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:13,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-16T19:44:13,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:44:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T19:44:13,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-16T19:44:13,673 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:44:13,680 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:44:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T19:44:13,698 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:44:13,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741847_1023 (size=167) 2024-12-16T19:44:13,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741847_1023 (size=167) 2024-12-16T19:44:13,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741847_1023 (size=167) 2024-12-16T19:44:13,741 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:44:13,744 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5}] 2024-12-16T19:44:13,749 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:13,749 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:13,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T19:44:13,910 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:13,911 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:13,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-16T19:44:13,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-16T19:44:13,918 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:13,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 413ac07087e3fd3ccce4c84d26ac31f5: 2024-12-16T19:44:13,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. for emptySnaptb0-testExportWithTargetName completed. 2024-12-16T19:44:13,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:13,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 775d875c2b0f65030632e2caf55f9408: 2024-12-16T19:44:13,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. for emptySnaptb0-testExportWithTargetName completed. 2024-12-16T19:44:13,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-16T19:44:13,925 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-16T19:44:13,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:13,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:13,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:44:13,936 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:44:13,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T19:44:14,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741849_1025 (size=70) 2024-12-16T19:44:14,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741849_1025 (size=70) 2024-12-16T19:44:14,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741849_1025 (size=70) 2024-12-16T19:44:14,084 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:14,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-16T19:44:14,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-16T19:44:14,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:14,092 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:14,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408 in 351 msec 2024-12-16T19:44:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741848_1024 (size=70) 2024-12-16T19:44:14,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741848_1024 (size=70) 2024-12-16T19:44:14,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741848_1024 (size=70) 2024-12-16T19:44:14,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:14,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-16T19:44:14,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-16T19:44:14,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:14,121 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:14,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=17 2024-12-16T19:44:14,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5 in 380 msec 2024-12-16T19:44:14,128 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:44:14,131 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:44:14,133 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:44:14,134 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:44:14,134 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:14,135 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:44:14,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741850_1026 (size=62) 2024-12-16T19:44:14,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741850_1026 (size=62) 2024-12-16T19:44:14,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741850_1026 (size=62) 2024-12-16T19:44:14,150 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:44:14,150 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-16T19:44:14,153 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-16T19:44:14,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741851_1027 (size=649) 2024-12-16T19:44:14,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741851_1027 (size=649) 2024-12-16T19:44:14,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741851_1027 (size=649) 2024-12-16T19:44:14,184 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:44:14,201 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:44:14,204 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-16T19:44:14,208 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:44:14,208 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-16T19:44:14,213 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 548 msec 2024-12-16T19:44:14,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-16T19:44:14,290 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-16T19:44:14,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:44:14,313 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:44:14,324 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-16T19:44:14,324 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:14,325 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:14,359 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T19:44:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378254359 (current time:1734378254359). 2024-12-16T19:44:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:44:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-16T19:44:14,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:44:14,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x57ea8bfc to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b66464f 2024-12-16T19:44:14,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@481b07ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:14,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:14,425 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58572, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:14,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x57ea8bfc to 127.0.0.1:62515 2024-12-16T19:44:14,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:14,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5133649b to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8be14cc 2024-12-16T19:44:14,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1888e4b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:14,490 DEBUG [hconnection-0x6770b03b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:14,492 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:14,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:14,498 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:14,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5133649b to 127.0.0.1:62515 2024-12-16T19:44:14,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:14,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-16T19:44:14,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:44:14,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-16T19:44:14,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-16T19:44:14,512 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:44:14,514 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:44:14,521 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:44:14,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T19:44:14,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741852_1028 (size=162) 2024-12-16T19:44:14,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T19:44:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741852_1028 (size=162) 2024-12-16T19:44:14,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741852_1028 (size=162) 2024-12-16T19:44:14,676 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:44:14,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5}] 2024-12-16T19:44:14,679 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:14,685 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:14,834 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:14,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-16T19:44:14,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T19:44:14,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:14,844 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing 413ac07087e3fd3ccce4c84d26ac31f5 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-16T19:44:14,858 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:14,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-16T19:44:14,862 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:14,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 775d875c2b0f65030632e2caf55f9408 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-16T19:44:14,990 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5 is 71, key is 14d41d835171bd4f76347239e9a557ae/cf:q/1734378254313/Put/seqid=0 2024-12-16T19:44:14,988 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408 is 71, key is 005296d7445a5cd6a0a5684b25fb117d/cf:q/1734378254312/Put/seqid=0 2024-12-16T19:44:15,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741853_1029 (size=5311) 2024-12-16T19:44:15,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741853_1029 (size=5311) 2024-12-16T19:44:15,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741853_1029 (size=5311) 2024-12-16T19:44:15,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:15,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741854_1030 (size=7961) 2024-12-16T19:44:15,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T19:44:15,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741854_1030 (size=7961) 2024-12-16T19:44:15,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741854_1030 (size=7961) 2024-12-16T19:44:15,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:15,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:15,275 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:15,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/.tmp/cf/a9b0987e47454d788d31e05132688e87, store: [table=testtb-testExportWithTargetName family=cf region=775d875c2b0f65030632e2caf55f9408] 2024-12-16T19:44:15,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/.tmp/cf/4d7e5f647da844df8b2ed538889921d5, store: [table=testtb-testExportWithTargetName family=cf region=413ac07087e3fd3ccce4c84d26ac31f5] 2024-12-16T19:44:15,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/.tmp/cf/4d7e5f647da844df8b2ed538889921d5 is 208, key is 1cee6cb38d7d21293053cde46b5a91c04/cf:q/1734378254313/Put/seqid=0 2024-12-16T19:44:15,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/.tmp/cf/a9b0987e47454d788d31e05132688e87 is 208, key is 0486d4adf8977141e51e56dac371b3b94/cf:q/1734378254312/Put/seqid=0 2024-12-16T19:44:15,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741856_1032 (size=6524) 2024-12-16T19:44:15,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741856_1032 (size=6524) 2024-12-16T19:44:15,422 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/.tmp/cf/a9b0987e47454d788d31e05132688e87 2024-12-16T19:44:15,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741856_1032 (size=6524) 2024-12-16T19:44:15,443 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/.tmp/cf/a9b0987e47454d788d31e05132688e87 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf/a9b0987e47454d788d31e05132688e87 2024-12-16T19:44:15,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741855_1031 (size=14337) 2024-12-16T19:44:15,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741855_1031 (size=14337) 2024-12-16T19:44:15,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741855_1031 (size=14337) 2024-12-16T19:44:15,458 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/.tmp/cf/4d7e5f647da844df8b2ed538889921d5 2024-12-16T19:44:15,473 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf/a9b0987e47454d788d31e05132688e87, entries=6, sequenceid=6, filesize=6.4 K 2024-12-16T19:44:15,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/.tmp/cf/4d7e5f647da844df8b2ed538889921d5 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf/4d7e5f647da844df8b2ed538889921d5 2024-12-16T19:44:15,508 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 775d875c2b0f65030632e2caf55f9408 in 644ms, sequenceid=6, compaction requested=false 2024-12-16T19:44:15,508 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-16T19:44:15,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 775d875c2b0f65030632e2caf55f9408: 2024-12-16T19:44:15,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. for snaptb0-testExportWithTargetName completed. 2024-12-16T19:44:15,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-16T19:44:15,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:15,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf/a9b0987e47454d788d31e05132688e87] hfiles 2024-12-16T19:44:15,511 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf/a9b0987e47454d788d31e05132688e87 for snapshot=snaptb0-testExportWithTargetName 2024-12-16T19:44:15,527 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf/4d7e5f647da844df8b2ed538889921d5, entries=44, sequenceid=6, filesize=14.0 K 2024-12-16T19:44:15,530 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for 413ac07087e3fd3ccce4c84d26ac31f5 in 687ms, sequenceid=6, compaction requested=false 2024-12-16T19:44:15,530 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for 413ac07087e3fd3ccce4c84d26ac31f5: 2024-12-16T19:44:15,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. for snaptb0-testExportWithTargetName completed. 2024-12-16T19:44:15,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-16T19:44:15,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:15,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf/4d7e5f647da844df8b2ed538889921d5] hfiles 2024-12-16T19:44:15,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf/4d7e5f647da844df8b2ed538889921d5 for snapshot=snaptb0-testExportWithTargetName 2024-12-16T19:44:15,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741858_1034 (size=109) 2024-12-16T19:44:15,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741858_1034 (size=109) 2024-12-16T19:44:15,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741858_1034 (size=109) 2024-12-16T19:44:15,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:15,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-16T19:44:15,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-16T19:44:15,641 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:15,641 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:15,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741857_1033 (size=109) 2024-12-16T19:44:15,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741857_1033 (size=109) 2024-12-16T19:44:15,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T19:44:15,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741857_1033 (size=109) 2024-12-16T19:44:15,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:15,654 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-16T19:44:15,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-16T19:44:15,655 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:15,655 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:15,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5 in 975 msec 2024-12-16T19:44:15,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-16T19:44:15,676 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 775d875c2b0f65030632e2caf55f9408 in 988 msec 2024-12-16T19:44:15,677 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:44:15,681 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:44:15,711 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:44:15,711 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:44:15,711 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:15,718 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408] hfiles 2024-12-16T19:44:15,719 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:15,719 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:15,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741859_1035 (size=293) 2024-12-16T19:44:15,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741859_1035 (size=293) 2024-12-16T19:44:15,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741859_1035 (size=293) 2024-12-16T19:44:15,800 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:44:15,800 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-16T19:44:15,802 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-16T19:44:15,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741860_1036 (size=959) 2024-12-16T19:44:15,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741860_1036 (size=959) 2024-12-16T19:44:15,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741860_1036 (size=959) 2024-12-16T19:44:15,942 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:44:15,958 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:44:15,962 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-16T19:44:15,966 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:44:15,966 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-16T19:44:15,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 1.4600 sec 2024-12-16T19:44:16,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-16T19:44:16,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:16,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-16T19:44:16,657 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-16T19:44:16,657 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657 2024-12-16T19:44:16,658 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:16,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:16,794 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-16T19:44:16,802 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:44:16,859 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-16T19:44:16,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741861_1037 (size=162) 2024-12-16T19:44:16,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741861_1037 (size=162) 2024-12-16T19:44:16,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741861_1037 (size=162) 2024-12-16T19:44:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741862_1038 (size=959) 2024-12-16T19:44:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741862_1038 (size=959) 2024-12-16T19:44:16,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741862_1038 (size=959) 2024-12-16T19:44:17,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741863_1039 (size=154) 2024-12-16T19:44:17,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741863_1039 (size=154) 2024-12-16T19:44:17,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741863_1039 (size=154) 2024-12-16T19:44:17,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:17,378 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:17,379 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:17,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,400 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:44:18,530 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-14002362915615111437.jar 2024-12-16T19:44:18,531 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,532 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-3090008321647162937.jar 2024-12-16T19:44:18,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,623 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,624 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,625 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:18,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:44:18,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:44:18,632 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:44:18,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:44:18,633 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:44:18,635 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:44:18,636 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:44:18,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:44:18,637 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:44:18,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:44:18,638 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:44:18,640 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:44:18,644 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:18,645 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:18,646 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:44:18,647 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:18,649 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:18,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:44:18,652 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:44:18,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741864_1040 (size=127628) 2024-12-16T19:44:18,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741864_1040 (size=127628) 2024-12-16T19:44:18,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741864_1040 (size=127628) 2024-12-16T19:44:19,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741865_1041 (size=2172137) 2024-12-16T19:44:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741865_1041 (size=2172137) 2024-12-16T19:44:19,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741865_1041 (size=2172137) 2024-12-16T19:44:19,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741866_1042 (size=213228) 2024-12-16T19:44:19,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741866_1042 (size=213228) 2024-12-16T19:44:19,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741866_1042 (size=213228) 2024-12-16T19:44:19,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741867_1043 (size=1877034) 2024-12-16T19:44:19,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741867_1043 (size=1877034) 2024-12-16T19:44:19,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741867_1043 (size=1877034) 2024-12-16T19:44:19,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741868_1044 (size=533455) 2024-12-16T19:44:19,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741868_1044 (size=533455) 2024-12-16T19:44:19,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741868_1044 (size=533455) 2024-12-16T19:44:19,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741869_1045 (size=7280644) 2024-12-16T19:44:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741869_1045 (size=7280644) 2024-12-16T19:44:19,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741869_1045 (size=7280644) 2024-12-16T19:44:19,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741870_1046 (size=4188619) 2024-12-16T19:44:19,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741870_1046 (size=4188619) 2024-12-16T19:44:19,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741870_1046 (size=4188619) 2024-12-16T19:44:19,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741871_1047 (size=20406) 2024-12-16T19:44:19,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741871_1047 (size=20406) 2024-12-16T19:44:19,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741871_1047 (size=20406) 2024-12-16T19:44:19,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741872_1048 (size=75495) 2024-12-16T19:44:19,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741872_1048 (size=75495) 2024-12-16T19:44:19,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741872_1048 (size=75495) 2024-12-16T19:44:19,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741873_1049 (size=45609) 2024-12-16T19:44:19,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741873_1049 (size=45609) 2024-12-16T19:44:19,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741873_1049 (size=45609) 2024-12-16T19:44:19,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741874_1050 (size=110084) 2024-12-16T19:44:19,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741874_1050 (size=110084) 2024-12-16T19:44:19,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741874_1050 (size=110084) 2024-12-16T19:44:19,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741875_1051 (size=1323991) 2024-12-16T19:44:19,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741875_1051 (size=1323991) 2024-12-16T19:44:19,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741875_1051 (size=1323991) 2024-12-16T19:44:19,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741876_1052 (size=23076) 2024-12-16T19:44:19,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741876_1052 (size=23076) 2024-12-16T19:44:19,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741876_1052 (size=23076) 2024-12-16T19:44:19,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741877_1053 (size=126803) 2024-12-16T19:44:19,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741877_1053 (size=126803) 2024-12-16T19:44:19,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741877_1053 (size=126803) 2024-12-16T19:44:19,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741878_1054 (size=322274) 2024-12-16T19:44:19,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741878_1054 (size=322274) 2024-12-16T19:44:19,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741878_1054 (size=322274) 2024-12-16T19:44:19,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741879_1055 (size=1832290) 2024-12-16T19:44:19,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741879_1055 (size=1832290) 2024-12-16T19:44:19,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741879_1055 (size=1832290) 2024-12-16T19:44:19,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741880_1056 (size=30081) 2024-12-16T19:44:19,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741880_1056 (size=30081) 2024-12-16T19:44:19,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741880_1056 (size=30081) 2024-12-16T19:44:19,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741881_1057 (size=53616) 2024-12-16T19:44:19,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741881_1057 (size=53616) 2024-12-16T19:44:19,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741881_1057 (size=53616) 2024-12-16T19:44:19,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741882_1058 (size=6350922) 2024-12-16T19:44:19,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741882_1058 (size=6350922) 2024-12-16T19:44:19,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741882_1058 (size=6350922) 2024-12-16T19:44:19,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741883_1059 (size=29229) 2024-12-16T19:44:19,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741883_1059 (size=29229) 2024-12-16T19:44:19,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741883_1059 (size=29229) 2024-12-16T19:44:19,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741884_1060 (size=169089) 2024-12-16T19:44:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741884_1060 (size=169089) 2024-12-16T19:44:19,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741884_1060 (size=169089) 2024-12-16T19:44:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741885_1061 (size=451756) 2024-12-16T19:44:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741885_1061 (size=451756) 2024-12-16T19:44:19,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741885_1061 (size=451756) 2024-12-16T19:44:20,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741886_1062 (size=5175431) 2024-12-16T19:44:20,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741886_1062 (size=5175431) 2024-12-16T19:44:20,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741886_1062 (size=5175431) 2024-12-16T19:44:20,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741887_1063 (size=136454) 2024-12-16T19:44:20,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741887_1063 (size=136454) 2024-12-16T19:44:20,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741887_1063 (size=136454) 2024-12-16T19:44:20,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741888_1064 (size=907467) 2024-12-16T19:44:20,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741888_1064 (size=907467) 2024-12-16T19:44:20,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741888_1064 (size=907467) 2024-12-16T19:44:20,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741889_1065 (size=3317408) 2024-12-16T19:44:20,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741889_1065 (size=3317408) 2024-12-16T19:44:20,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741889_1065 (size=3317408) 2024-12-16T19:44:20,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741890_1066 (size=503880) 2024-12-16T19:44:20,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741890_1066 (size=503880) 2024-12-16T19:44:20,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741890_1066 (size=503880) 2024-12-16T19:44:20,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741891_1067 (size=4695811) 2024-12-16T19:44:20,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741891_1067 (size=4695811) 2024-12-16T19:44:20,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741891_1067 (size=4695811) 2024-12-16T19:44:20,432 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:44:20,457 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-16T19:44:20,465 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.3 K 2024-12-16T19:44:20,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741892_1068 (size=722) 2024-12-16T19:44:20,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741892_1068 (size=722) 2024-12-16T19:44:20,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741892_1068 (size=722) 2024-12-16T19:44:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741893_1069 (size=15) 2024-12-16T19:44:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741893_1069 (size=15) 2024-12-16T19:44:20,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741893_1069 (size=15) 2024-12-16T19:44:20,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741894_1070 (size=304884) 2024-12-16T19:44:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741894_1070 (size=304884) 2024-12-16T19:44:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741894_1070 (size=304884) 2024-12-16T19:44:21,241 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:44:21,241 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:44:21,791 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0001_000001 (auth:SIMPLE) from 127.0.0.1:43952 2024-12-16T19:44:24,254 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:44:29,549 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T19:44:29,552 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T19:44:33,547 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T19:44:33,550 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58304, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T19:44:33,551 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-16T19:44:33,556 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-16T19:44:34,579 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0001_000001 (auth:SIMPLE) from 127.0.0.1:54204 2024-12-16T19:44:35,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741895_1071 (size=350558) 2024-12-16T19:44:35,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741895_1071 (size=350558) 2024-12-16T19:44:35,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741895_1071 (size=350558) 2024-12-16T19:44:37,042 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0001_000001 (auth:SIMPLE) from 127.0.0.1:34424 2024-12-16T19:44:44,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741896_1072 (size=14337) 2024-12-16T19:44:44,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741896_1072 (size=14337) 2024-12-16T19:44:44,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741896_1072 (size=14337) 2024-12-16T19:44:44,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741897_1073 (size=7961) 2024-12-16T19:44:44,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741897_1073 (size=7961) 2024-12-16T19:44:44,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741897_1073 (size=7961) 2024-12-16T19:44:44,645 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-16T19:44:44,645 INFO [master/7a6e4bf70377:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-16T19:44:44,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741898_1074 (size=6524) 2024-12-16T19:44:44,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741898_1074 (size=6524) 2024-12-16T19:44:44,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741898_1074 (size=6524) 2024-12-16T19:44:45,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741899_1075 (size=5311) 2024-12-16T19:44:45,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741899_1075 (size=5311) 2024-12-16T19:44:45,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741899_1075 (size=5311) 2024-12-16T19:44:45,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741900_1076 (size=17465) 2024-12-16T19:44:45,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741900_1076 (size=17465) 2024-12-16T19:44:45,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741900_1076 (size=17465) 2024-12-16T19:44:45,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741901_1077 (size=464) 2024-12-16T19:44:45,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741901_1077 (size=464) 2024-12-16T19:44:45,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741901_1077 (size=464) 2024-12-16T19:44:45,322 WARN [ContainersLauncher #0 {}] launcher.ContainerCleanup(157): Exception when trying to cleanup container container_1734378249643_0001_01_000002: java.io.FileNotFoundException: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/nmPrivate/application_1734378249643_0001/container_1734378249643_0001_01_000002/container_1734378249643_0001_01_000002.pid (No such file or directory) at java.base/java.io.FileInputStream.open0(Native Method) at java.base/java.io.FileInputStream.open(FileInputStream.java:216) at java.base/java.io.FileInputStream.(FileInputStream.java:157) at org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader.getProcessId(ProcessIdFileReader.java:59) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.getContainerPid(ContainerLaunch.java:1050) at org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerCleanup.run(ContainerCleanup.java:119) at java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-16T19:44:45,336 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0001/container_1734378249643_0001_01_000002/launch_container.sh] 2024-12-16T19:44:45,336 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0001/container_1734378249643_0001_01_000002/container_tokens] 2024-12-16T19:44:45,337 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0001/container_1734378249643_0001_01_000002/sysfs] 2024-12-16T19:44:45,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741902_1078 (size=17465) 2024-12-16T19:44:45,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741902_1078 (size=17465) 2024-12-16T19:44:45,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741902_1078 (size=17465) 2024-12-16T19:44:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741903_1079 (size=350558) 2024-12-16T19:44:45,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741903_1079 (size=350558) 2024-12-16T19:44:45,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741903_1079 (size=350558) 2024-12-16T19:44:45,427 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0001_000001 (auth:SIMPLE) from 127.0.0.1:34442 2024-12-16T19:44:46,757 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:44:46,758 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:44:46,790 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-16T19:44:46,790 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:44:46,791 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:44:46,791 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-16T19:44:46,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-16T19:44:46,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-16T19:44:46,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657/.hbase-snapshot/testExportWithTargetName 2024-12-16T19:44:46,793 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-16T19:44:46,793 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378256657/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-16T19:44:46,809 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-16T19:44:46,813 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-16T19:44:46,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:46,829 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378286829"}]},"ts":"1734378286829"} 2024-12-16T19:44:46,832 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-16T19:44:46,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T19:44:46,842 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-16T19:44:46,846 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-16T19:44:46,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, UNASSIGN}] 2024-12-16T19:44:46,862 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, UNASSIGN 2024-12-16T19:44:46,864 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, UNASSIGN 2024-12-16T19:44:46,864 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=775d875c2b0f65030632e2caf55f9408, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:46,866 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=413ac07087e3fd3ccce4c84d26ac31f5, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:46,867 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:44:46,868 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE; CloseRegionProcedure 775d875c2b0f65030632e2caf55f9408, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:44:46,871 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:44:46,872 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=26, state=RUNNABLE; CloseRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:44:46,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T19:44:47,034 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:47,041 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:47,041 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:44:47,042 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:47,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing 775d875c2b0f65030632e2caf55f9408, disabling compactions & flushes 2024-12-16T19:44:47,042 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:47,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:47,042 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. after waiting 0 ms 2024-12-16T19:44:47,043 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:47,054 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:47,054 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:44:47,055 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 413ac07087e3fd3ccce4c84d26ac31f5, disabling compactions & flushes 2024-12-16T19:44:47,055 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:47,055 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:47,055 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. after waiting 0 ms 2024-12-16T19:44:47,055 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:47,079 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:44:47,086 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:44:47,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:44:47,087 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408. 2024-12-16T19:44:47,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for 775d875c2b0f65030632e2caf55f9408: 2024-12-16T19:44:47,088 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:44:47,088 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5. 2024-12-16T19:44:47,088 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 413ac07087e3fd3ccce4c84d26ac31f5: 2024-12-16T19:44:47,090 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed 775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:47,091 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=775d875c2b0f65030632e2caf55f9408, regionState=CLOSED 2024-12-16T19:44:47,092 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:47,094 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=413ac07087e3fd3ccce4c84d26ac31f5, regionState=CLOSED 2024-12-16T19:44:47,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=25 2024-12-16T19:44:47,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=25, state=SUCCESS; CloseRegionProcedure 775d875c2b0f65030632e2caf55f9408, server=7a6e4bf70377,42861,1734378237024 in 226 msec 2024-12-16T19:44:47,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=26 2024-12-16T19:44:47,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=26, state=SUCCESS; CloseRegionProcedure 413ac07087e3fd3ccce4c84d26ac31f5, server=7a6e4bf70377,42741,1734378236851 in 226 msec 2024-12-16T19:44:47,103 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=775d875c2b0f65030632e2caf55f9408, UNASSIGN in 241 msec 2024-12-16T19:44:47,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=26, resume processing ppid=24 2024-12-16T19:44:47,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=413ac07087e3fd3ccce4c84d26ac31f5, UNASSIGN in 242 msec 2024-12-16T19:44:47,112 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-16T19:44:47,113 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 264 msec 2024-12-16T19:44:47,117 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378287117"}]},"ts":"1734378287117"} 2024-12-16T19:44:47,120 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-16T19:44:47,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T19:44:47,339 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-16T19:44:47,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 524 msec 2024-12-16T19:44:47,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-16T19:44:47,443 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-16T19:44:47,447 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-16T19:44:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:47,458 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:47,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-16T19:44:47,460 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:47,465 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-16T19:44:47,471 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:47,477 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/recovered.edits] 2024-12-16T19:44:47,478 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:47,480 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/recovered.edits] 2024-12-16T19:44:47,486 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf/a9b0987e47454d788d31e05132688e87 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/cf/a9b0987e47454d788d31e05132688e87 2024-12-16T19:44:47,486 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf/4d7e5f647da844df8b2ed538889921d5 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/cf/4d7e5f647da844df8b2ed538889921d5 2024-12-16T19:44:47,490 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5/recovered.edits/9.seqid 2024-12-16T19:44:47,490 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408/recovered.edits/9.seqid 2024-12-16T19:44:47,491 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:47,491 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithTargetName/775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:47,491 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-16T19:44:47,492 DEBUG [PEWorker-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-16T19:44:47,492 DEBUG [PEWorker-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-12-16T19:44:47,500 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202412161895301c6a0f4855a16f0e01d6d4689d_413ac07087e3fd3ccce4c84d26ac31f5 2024-12-16T19:44:47,506 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e20241216c36b307cbf464286b7c03ca181382597_775d875c2b0f65030632e2caf55f9408 2024-12-16T19:44:47,507 DEBUG [PEWorker-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-12-16T19:44:47,510 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:47,515 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-16T19:44:47,519 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-16T19:44:47,525 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-16T19:44:47,527 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:47,527 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-16T19:44:47,528 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378287527"}]},"ts":"9223372036854775807"} 2024-12-16T19:44:47,528 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378287527"}]},"ts":"9223372036854775807"} 2024-12-16T19:44:47,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T19:44:47,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T19:44:47,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T19:44:47,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T19:44:47,530 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-16T19:44:47,530 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-16T19:44:47,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T19:44:47,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:47,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:47,537 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-16T19:44:47,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-16T19:44:47,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:47,537 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:44:47,537 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:44:47,537 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 775d875c2b0f65030632e2caf55f9408, NAME => 'testtb-testExportWithTargetName,,1734378252295.775d875c2b0f65030632e2caf55f9408.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 413ac07087e3fd3ccce4c84d26ac31f5, NAME => 'testtb-testExportWithTargetName,1,1734378252295.413ac07087e3fd3ccce4c84d26ac31f5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:44:47,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:47,538 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-16T19:44:47,538 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data null 2024-12-16T19:44:47,538 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:44:47,538 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378287538"}]},"ts":"9223372036854775807"} 2024-12-16T19:44:47,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-16T19:44:47,549 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-16T19:44:47,560 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-16T19:44:47,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 112 msec 2024-12-16T19:44:47,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-16T19:44:47,643 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-16T19:44:47,664 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-16T19:44:47,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-16T19:44:47,671 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-16T19:44:47,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-16T19:44:47,704 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=777 (was 718) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 55378) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36543 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:50756 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:38709 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_229706635_1 at /127.0.0.1:56112 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:35232 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:56132 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_229706635_1 at /127.0.0.1:50734 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:37293 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37293 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'JobHistoryServer' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1338 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33833 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:36543 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=811 (was 775) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1127 (was 803) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=2666 (was 3945) 2024-12-16T19:44:47,704 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-12-16T19:44:47,725 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=777, OpenFileDescriptor=811, MaxFileDescriptor=1048576, SystemLoadAverage=1127, ProcessCount=17, AvailableMemoryMB=2665 2024-12-16T19:44:47,725 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=777 is superior to 500 2024-12-16T19:44:47,727 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:44:47,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:44:47,731 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:44:47,731 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-16T19:44:47,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T19:44:47,732 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:44:47,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741904_1080 (size=440) 2024-12-16T19:44:47,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741904_1080 (size=440) 2024-12-16T19:44:47,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741904_1080 (size=440) 2024-12-16T19:44:47,750 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 2eafc8c30dc3068090fca07c947c39fa, NAME => 'testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:47,750 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c9c072b54498c6df337346164c585ad9, NAME => 'testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:47,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741906_1082 (size=65) 2024-12-16T19:44:47,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741906_1082 (size=65) 2024-12-16T19:44:47,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741906_1082 (size=65) 2024-12-16T19:44:47,766 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:47,766 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 2eafc8c30dc3068090fca07c947c39fa, disabling compactions & flushes 2024-12-16T19:44:47,766 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:47,766 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:47,766 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. after waiting 0 ms 2024-12-16T19:44:47,766 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:47,766 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:47,766 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 2eafc8c30dc3068090fca07c947c39fa: 2024-12-16T19:44:47,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741905_1081 (size=65) 2024-12-16T19:44:47,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741905_1081 (size=65) 2024-12-16T19:44:47,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741905_1081 (size=65) 2024-12-16T19:44:47,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:47,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing c9c072b54498c6df337346164c585ad9, disabling compactions & flushes 2024-12-16T19:44:47,778 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:47,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:47,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. after waiting 0 ms 2024-12-16T19:44:47,778 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:47,779 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:47,779 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for c9c072b54498c6df337346164c585ad9: 2024-12-16T19:44:47,780 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:44:47,780 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734378287780"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378287780"}]},"ts":"1734378287780"} 2024-12-16T19:44:47,780 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734378287780"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378287780"}]},"ts":"1734378287780"} 2024-12-16T19:44:47,783 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:44:47,784 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:44:47,785 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378287784"}]},"ts":"1734378287784"} 2024-12-16T19:44:47,786 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-16T19:44:47,804 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:44:47,805 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:44:47,805 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:44:47,805 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:44:47,805 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:44:47,805 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:44:47,805 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:44:47,805 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:44:47,806 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, ASSIGN}] 2024-12-16T19:44:47,808 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, ASSIGN 2024-12-16T19:44:47,808 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, ASSIGN 2024-12-16T19:44:47,809 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:44:47,809 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:44:47,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T19:44:47,960 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:44:47,960 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=2eafc8c30dc3068090fca07c947c39fa, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:47,960 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=c9c072b54498c6df337346164c585ad9, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:47,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=31, state=RUNNABLE; OpenRegionProcedure 2eafc8c30dc3068090fca07c947c39fa, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:44:47,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure c9c072b54498c6df337346164c585ad9, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:44:48,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T19:44:48,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:48,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:48,152 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:48,153 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 2eafc8c30dc3068090fca07c947c39fa, NAME => 'testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:44:48,153 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. service=AccessControlService 2024-12-16T19:44:48,153 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:48,154 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,154 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:48,154 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,154 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,159 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:48,159 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => c9c072b54498c6df337346164c585ad9, NAME => 'testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:44:48,160 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. service=AccessControlService 2024-12-16T19:44:48,161 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:48,161 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,161 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:48,161 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,161 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,158 INFO [StoreOpener-2eafc8c30dc3068090fca07c947c39fa-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,170 INFO [StoreOpener-c9c072b54498c6df337346164c585ad9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,176 INFO [StoreOpener-2eafc8c30dc3068090fca07c947c39fa-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2eafc8c30dc3068090fca07c947c39fa columnFamilyName cf 2024-12-16T19:44:48,183 INFO [StoreOpener-c9c072b54498c6df337346164c585ad9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c9c072b54498c6df337346164c585ad9 columnFamilyName cf 2024-12-16T19:44:48,184 DEBUG [StoreOpener-2eafc8c30dc3068090fca07c947c39fa-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:48,190 DEBUG [StoreOpener-c9c072b54498c6df337346164c585ad9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:48,193 INFO [StoreOpener-2eafc8c30dc3068090fca07c947c39fa-1 {}] regionserver.HStore(327): Store=2eafc8c30dc3068090fca07c947c39fa/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:48,195 INFO [StoreOpener-c9c072b54498c6df337346164c585ad9-1 {}] regionserver.HStore(327): Store=c9c072b54498c6df337346164c585ad9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:48,196 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,196 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,200 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:48,198 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,206 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,223 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:48,226 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 2eafc8c30dc3068090fca07c947c39fa; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67374143, jitterRate=0.003952965140342712}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:48,227 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 2eafc8c30dc3068090fca07c947c39fa: 2024-12-16T19:44:48,228 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:48,229 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa., pid=33, masterSystemTime=1734378288133 2024-12-16T19:44:48,236 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:48,236 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:48,243 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=2eafc8c30dc3068090fca07c947c39fa, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:48,253 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=31 2024-12-16T19:44:48,253 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=31, state=SUCCESS; OpenRegionProcedure 2eafc8c30dc3068090fca07c947c39fa, server=7a6e4bf70377,42861,1734378237024 in 274 msec 2024-12-16T19:44:48,258 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, ASSIGN in 447 msec 2024-12-16T19:44:48,268 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:48,271 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened c9c072b54498c6df337346164c585ad9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69286546, jitterRate=0.03244999051094055}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:48,271 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for c9c072b54498c6df337346164c585ad9: 2024-12-16T19:44:48,274 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9., pid=34, masterSystemTime=1734378288146 2024-12-16T19:44:48,279 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=c9c072b54498c6df337346164c585ad9, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:48,285 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:48,286 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:48,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-16T19:44:48,297 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure c9c072b54498c6df337346164c585ad9, server=7a6e4bf70377,42741,1734378236851 in 308 msec 2024-12-16T19:44:48,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=30 2024-12-16T19:44:48,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, ASSIGN in 492 msec 2024-12-16T19:44:48,315 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:44:48,316 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378288315"}]},"ts":"1734378288315"} 2024-12-16T19:44:48,327 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-16T19:44:48,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T19:44:48,344 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:44:48,344 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-16T19:44:48,348 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T19:44:48,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:48,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:48,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:48,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:48,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:44:48,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:44:48,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:48,371 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:48,382 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:44:48,387 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:48,388 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:48,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 661 msec 2024-12-16T19:44:48,480 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-16T19:44:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-16T19:44:48,847 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-16T19:44:48,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-16T19:44:48,861 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:48,861 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:48,970 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T19:44:48,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378288971 (current time:1734378288971). 2024-12-16T19:44:48,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:44:48,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-16T19:44:48,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:44:48,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dfda5b3 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e9b7e0a 2024-12-16T19:44:49,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63148ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:49,045 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43604, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:49,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dfda5b3 to 127.0.0.1:62515 2024-12-16T19:44:49,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:49,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10bbc0f6 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@35f587c5 2024-12-16T19:44:49,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72d34e71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:49,102 DEBUG [hconnection-0x6b5a8d89-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:49,104 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43618, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:49,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:49,108 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:49,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10bbc0f6 to 127.0.0.1:62515 2024-12-16T19:44:49,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:49,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T19:44:49,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:44:49,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T19:44:49,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-16T19:44:49,117 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:44:49,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T19:44:49,118 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:44:49,126 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:44:49,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741907_1083 (size=161) 2024-12-16T19:44:49,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741907_1083 (size=161) 2024-12-16T19:44:49,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741907_1083 (size=161) 2024-12-16T19:44:49,173 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:44:49,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9}] 2024-12-16T19:44:49,176 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:49,176 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:49,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T19:44:49,329 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:49,330 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:49,331 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-16T19:44:49,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:49,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 2eafc8c30dc3068090fca07c947c39fa: 2024-12-16T19:44:49,332 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-16T19:44:49,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-16T19:44:49,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:49,333 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:44:49,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-16T19:44:49,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for c9c072b54498c6df337346164c585ad9: 2024-12-16T19:44:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-16T19:44:49,341 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-16T19:44:49,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:49,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:44:49,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741909_1085 (size=68) 2024-12-16T19:44:49,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741909_1085 (size=68) 2024-12-16T19:44:49,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741909_1085 (size=68) 2024-12-16T19:44:49,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741908_1084 (size=68) 2024-12-16T19:44:49,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:49,402 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-16T19:44:49,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-16T19:44:49,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741908_1084 (size=68) 2024-12-16T19:44:49,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741908_1084 (size=68) 2024-12-16T19:44:49,405 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:49,405 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:49,413 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa in 236 msec 2024-12-16T19:44:49,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T19:44:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T19:44:49,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:49,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-16T19:44:49,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-16T19:44:49,790 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:49,793 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:49,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=35 2024-12-16T19:44:49,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9 in 626 msec 2024-12-16T19:44:49,816 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:44:49,822 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:44:49,834 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:44:49,834 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:44:49,834 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:49,846 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:44:49,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741910_1086 (size=60) 2024-12-16T19:44:49,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741910_1086 (size=60) 2024-12-16T19:44:49,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741910_1086 (size=60) 2024-12-16T19:44:50,006 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:44:50,006 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-16T19:44:50,008 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-16T19:44:50,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741911_1087 (size=641) 2024-12-16T19:44:50,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741911_1087 (size=641) 2024-12-16T19:44:50,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741911_1087 (size=641) 2024-12-16T19:44:50,175 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:44:50,224 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:44:50,225 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-16T19:44:50,230 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:44:50,231 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-16T19:44:50,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T19:44:50,238 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 1.1190 sec 2024-12-16T19:44:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-16T19:44:51,241 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-16T19:44:51,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:44:51,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:44:51,269 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-16T19:44:51,269 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:51,269 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:51,326 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T19:44:51,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378291326 (current time:1734378291326). 2024-12-16T19:44:51,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:44:51,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-16T19:44:51,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:44:51,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x54324d93 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b3bb459 2024-12-16T19:44:51,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c970e26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:51,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:51,410 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43634, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:51,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x54324d93 to 127.0.0.1:62515 2024-12-16T19:44:51,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:51,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x258058e9 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6b08a60e 2024-12-16T19:44:51,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5807cc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:51,469 DEBUG [hconnection-0x2b1682ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:51,474 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43640, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:51,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:51,483 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x258058e9 to 127.0.0.1:62515 2024-12-16T19:44:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:51,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T19:44:51,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:44:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-16T19:44:51,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-16T19:44:51,503 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:44:51,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T19:44:51,510 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:44:51,516 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:44:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T19:44:51,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741912_1088 (size=156) 2024-12-16T19:44:51,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741912_1088 (size=156) 2024-12-16T19:44:51,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741912_1088 (size=156) 2024-12-16T19:44:51,728 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:44:51,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9}] 2024-12-16T19:44:51,743 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:51,744 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:51,775 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0001_000001 (auth:SIMPLE) from 127.0.0.1:51058 2024-12-16T19:44:51,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T19:44:51,824 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0001/container_1734378249643_0001_01_000001/launch_container.sh] 2024-12-16T19:44:51,824 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0001/container_1734378249643_0001_01_000001/container_tokens] 2024-12-16T19:44:51,824 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0001/container_1734378249643_0001_01_000001/sysfs] 2024-12-16T19:44:51,897 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:51,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-16T19:44:51,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:51,899 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 2eafc8c30dc3068090fca07c947c39fa 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-16T19:44:51,904 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:44:51,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-16T19:44:51,906 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:51,906 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing c9c072b54498c6df337346164c585ad9 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-16T19:44:51,951 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa is 71, key is 03c2d8dd6570a2bdcfcd7dca3d386514/cf:q/1734378291249/Put/seqid=0 2024-12-16T19:44:52,003 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9 is 71, key is 10409e6d566125f208322a581dbff82a/cf:q/1734378291260/Put/seqid=0 2024-12-16T19:44:52,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741913_1089 (size=5171) 2024-12-16T19:44:52,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741913_1089 (size=5171) 2024-12-16T19:44:52,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741913_1089 (size=5171) 2024-12-16T19:44:52,040 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:52,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:52,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/.tmp/cf/bb5be642aa384501b3951a3cb616c963, store: [table=testtb-testExportWithResetTtl family=cf region=2eafc8c30dc3068090fca07c947c39fa] 2024-12-16T19:44:52,089 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/.tmp/cf/bb5be642aa384501b3951a3cb616c963 is 206, key is 087ec72fbeb5015d62b4edff41a2d1477/cf:q/1734378291249/Put/seqid=0 2024-12-16T19:44:52,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741914_1090 (size=8101) 2024-12-16T19:44:52,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741914_1090 (size=8101) 2024-12-16T19:44:52,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741914_1090 (size=8101) 2024-12-16T19:44:52,118 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:52,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T19:44:52,145 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:52,151 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/.tmp/cf/64a0c82ba82a44f1aef440297182fa49, store: [table=testtb-testExportWithResetTtl family=cf region=c9c072b54498c6df337346164c585ad9] 2024-12-16T19:44:52,152 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/.tmp/cf/64a0c82ba82a44f1aef440297182fa49 is 206, key is 11440ebdb6c102fb8da86eafde7a4002f/cf:q/1734378291260/Put/seqid=0 2024-12-16T19:44:52,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741915_1091 (size=6106) 2024-12-16T19:44:52,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741915_1091 (size=6106) 2024-12-16T19:44:52,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741915_1091 (size=6106) 2024-12-16T19:44:52,179 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/.tmp/cf/bb5be642aa384501b3951a3cb616c963 2024-12-16T19:44:52,219 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/.tmp/cf/bb5be642aa384501b3951a3cb616c963 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf/bb5be642aa384501b3951a3cb616c963 2024-12-16T19:44:52,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741916_1092 (size=14651) 2024-12-16T19:44:52,231 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf/bb5be642aa384501b3951a3cb616c963, entries=4, sequenceid=6, filesize=6.0 K 2024-12-16T19:44:52,234 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2eafc8c30dc3068090fca07c947c39fa in 335ms, sequenceid=6, compaction requested=false 2024-12-16T19:44:52,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 2eafc8c30dc3068090fca07c947c39fa: 2024-12-16T19:44:52,234 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. for snaptb0-testExportWithResetTtl completed. 2024-12-16T19:44:52,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T19:44:52,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:52,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf/bb5be642aa384501b3951a3cb616c963] hfiles 2024-12-16T19:44:52,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf/bb5be642aa384501b3951a3cb616c963 for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T19:44:52,238 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/.tmp/cf/64a0c82ba82a44f1aef440297182fa49 2024-12-16T19:44:52,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741916_1092 (size=14651) 2024-12-16T19:44:52,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741916_1092 (size=14651) 2024-12-16T19:44:52,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/.tmp/cf/64a0c82ba82a44f1aef440297182fa49 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf/64a0c82ba82a44f1aef440297182fa49 2024-12-16T19:44:52,280 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf/64a0c82ba82a44f1aef440297182fa49, entries=46, sequenceid=6, filesize=14.3 K 2024-12-16T19:44:52,288 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for c9c072b54498c6df337346164c585ad9 in 382ms, sequenceid=6, compaction requested=false 2024-12-16T19:44:52,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for c9c072b54498c6df337346164c585ad9: 2024-12-16T19:44:52,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. for snaptb0-testExportWithResetTtl completed. 2024-12-16T19:44:52,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T19:44:52,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:52,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf/64a0c82ba82a44f1aef440297182fa49] hfiles 2024-12-16T19:44:52,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf/64a0c82ba82a44f1aef440297182fa49 for snapshot=snaptb0-testExportWithResetTtl 2024-12-16T19:44:52,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741917_1093 (size=107) 2024-12-16T19:44:52,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741917_1093 (size=107) 2024-12-16T19:44:52,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741917_1093 (size=107) 2024-12-16T19:44:52,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:44:52,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-16T19:44:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-16T19:44:52,337 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:52,338 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:52,355 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 2eafc8c30dc3068090fca07c947c39fa in 619 msec 2024-12-16T19:44:52,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741918_1094 (size=107) 2024-12-16T19:44:52,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741918_1094 (size=107) 2024-12-16T19:44:52,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741918_1094 (size=107) 2024-12-16T19:44:52,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T19:44:52,688 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:44:52,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:44:52,858 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-16T19:44:52,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-16T19:44:52,860 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:52,860 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:52,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-16T19:44:52,867 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure c9c072b54498c6df337346164c585ad9 in 1.1340 sec 2024-12-16T19:44:52,867 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:44:52,872 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:44:52,874 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:44:52,874 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:44:52,875 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:52,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa] hfiles 2024-12-16T19:44:52,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9 2024-12-16T19:44:52,881 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:44:52,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741919_1095 (size=291) 2024-12-16T19:44:52,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741919_1095 (size=291) 2024-12-16T19:44:52,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741919_1095 (size=291) 2024-12-16T19:44:53,308 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:44:53,308 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-16T19:44:53,309 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-16T19:44:53,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741920_1096 (size=951) 2024-12-16T19:44:53,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741920_1096 (size=951) 2024-12-16T19:44:53,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741920_1096 (size=951) 2024-12-16T19:44:53,347 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:44:53,364 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:44:53,364 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-16T19:44:53,366 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:44:53,367 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-16T19:44:53,368 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 1.8730 sec 2024-12-16T19:44:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-16T19:44:53,626 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-16T19:44:53,628 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:44:53,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-16T19:44:53,632 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:44:53,632 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-16T19:44:53,633 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:44:53,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T19:44:53,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741921_1097 (size=433) 2024-12-16T19:44:53,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741921_1097 (size=433) 2024-12-16T19:44:53,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741921_1097 (size=433) 2024-12-16T19:44:53,669 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 76b4d4e09416bc1f5ddf2d01512c9b63, NAME => 'testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:53,670 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 229ba0f80f49b0aa2be2d6b7010de68e, NAME => 'testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:53,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741923_1099 (size=58) 2024-12-16T19:44:53,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741923_1099 (size=58) 2024-12-16T19:44:53,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741923_1099 (size=58) 2024-12-16T19:44:53,686 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:53,686 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 76b4d4e09416bc1f5ddf2d01512c9b63, disabling compactions & flushes 2024-12-16T19:44:53,686 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:53,686 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:53,686 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. after waiting 0 ms 2024-12-16T19:44:53,686 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:53,686 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:53,686 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 76b4d4e09416bc1f5ddf2d01512c9b63: 2024-12-16T19:44:53,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741922_1098 (size=58) 2024-12-16T19:44:53,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741922_1098 (size=58) 2024-12-16T19:44:53,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741922_1098 (size=58) 2024-12-16T19:44:53,695 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:53,696 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 229ba0f80f49b0aa2be2d6b7010de68e, disabling compactions & flushes 2024-12-16T19:44:53,696 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:53,696 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:53,696 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. after waiting 0 ms 2024-12-16T19:44:53,696 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:53,696 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:53,696 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 229ba0f80f49b0aa2be2d6b7010de68e: 2024-12-16T19:44:53,700 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:44:53,701 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734378293701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378293701"}]},"ts":"1734378293701"} 2024-12-16T19:44:53,701 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1734378293701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378293701"}]},"ts":"1734378293701"} 2024-12-16T19:44:53,704 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:44:53,706 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:44:53,706 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378293706"}]},"ts":"1734378293706"} 2024-12-16T19:44:53,713 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-16T19:44:53,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T19:44:53,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T19:44:54,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T19:44:54,254 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:44:54,306 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:44:54,315 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:44:54,315 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:44:54,315 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:44:54,315 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:44:54,315 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:44:54,315 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:44:54,315 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:44:54,316 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, ASSIGN}] 2024-12-16T19:44:54,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, ASSIGN 2024-12-16T19:44:54,322 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, ASSIGN 2024-12-16T19:44:54,326 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:44:54,326 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:44:54,477 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:44:54,477 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=76b4d4e09416bc1f5ddf2d01512c9b63, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:54,477 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=229ba0f80f49b0aa2be2d6b7010de68e, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:54,490 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:44:54,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=43, state=RUNNABLE; OpenRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:44:54,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:54,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:54,686 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:54,686 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 229ba0f80f49b0aa2be2d6b7010de68e, NAME => 'testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:44:54,686 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. service=AccessControlService 2024-12-16T19:44:54,687 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:54,687 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,687 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:54,687 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,687 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,698 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:54,699 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => 76b4d4e09416bc1f5ddf2d01512c9b63, NAME => 'testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:44:54,699 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. service=AccessControlService 2024-12-16T19:44:54,699 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:44:54,700 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,700 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:44:54,700 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,700 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,702 INFO [StoreOpener-229ba0f80f49b0aa2be2d6b7010de68e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,705 INFO [StoreOpener-229ba0f80f49b0aa2be2d6b7010de68e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 229ba0f80f49b0aa2be2d6b7010de68e columnFamilyName cf 2024-12-16T19:44:54,706 INFO [StoreOpener-76b4d4e09416bc1f5ddf2d01512c9b63-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,712 INFO [StoreOpener-76b4d4e09416bc1f5ddf2d01512c9b63-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76b4d4e09416bc1f5ddf2d01512c9b63 columnFamilyName cf 2024-12-16T19:44:54,713 DEBUG [StoreOpener-76b4d4e09416bc1f5ddf2d01512c9b63-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:54,714 INFO [StoreOpener-76b4d4e09416bc1f5ddf2d01512c9b63-1 {}] regionserver.HStore(327): Store=76b4d4e09416bc1f5ddf2d01512c9b63/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:54,715 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,717 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,717 DEBUG [StoreOpener-229ba0f80f49b0aa2be2d6b7010de68e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:54,721 INFO [StoreOpener-229ba0f80f49b0aa2be2d6b7010de68e-1 {}] regionserver.HStore(327): Store=229ba0f80f49b0aa2be2d6b7010de68e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:44:54,723 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,724 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,726 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:54,728 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:54,732 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:54,733 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 229ba0f80f49b0aa2be2d6b7010de68e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59914469, jitterRate=-0.10720483958721161}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:54,733 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 229ba0f80f49b0aa2be2d6b7010de68e: 2024-12-16T19:44:54,734 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:44:54,739 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e., pid=45, masterSystemTime=1734378294668 2024-12-16T19:44:54,742 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened 76b4d4e09416bc1f5ddf2d01512c9b63; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67358704, jitterRate=0.0037229061126708984}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:44:54,742 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for 76b4d4e09416bc1f5ddf2d01512c9b63: 2024-12-16T19:44:54,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T19:44:54,746 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63., pid=44, masterSystemTime=1734378294653 2024-12-16T19:44:54,747 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:54,747 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:54,749 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:54,750 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:54,753 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=76b4d4e09416bc1f5ddf2d01512c9b63, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:54,754 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=229ba0f80f49b0aa2be2d6b7010de68e, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:54,770 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-16T19:44:54,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=43 2024-12-16T19:44:54,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=43, state=SUCCESS; OpenRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e, server=7a6e4bf70377,42861,1734378237024 in 261 msec 2024-12-16T19:44:54,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, ASSIGN in 454 msec 2024-12-16T19:44:54,773 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63, server=7a6e4bf70377,45217,1734378236605 in 267 msec 2024-12-16T19:44:54,778 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:44:54,778 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378294778"}]},"ts":"1734378294778"} 2024-12-16T19:44:54,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-16T19:44:54,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, ASSIGN in 455 msec 2024-12-16T19:44:54,781 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-16T19:44:55,170 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:44:55,170 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-16T19:44:55,188 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T19:44:55,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:55,334 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:55,335 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:55,336 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:44:55,660 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,660 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,660 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,660 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,660 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,660 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,668 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 2.0350 sec 2024-12-16T19:44:55,674 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,675 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:44:55,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-16T19:44:55,747 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-16T19:44:55,767 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-16T19:44:55,767 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:55,767 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:55,805 DEBUG [hconnection-0x304e9acc-shared-pool-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:55,825 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:55,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:44:55,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:44:55,920 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-16T19:44:55,920 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:55,920 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:44:55,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-16T19:44:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378295954 (current time:1734378295954). 2024-12-16T19:44:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-16T19:44:55,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:44:55,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1657cf22 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@590c1a95 2024-12-16T19:44:55,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@110222af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:55,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:56,000 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55962, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:56,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1657cf22 to 127.0.0.1:62515 2024-12-16T19:44:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:56,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0984adae to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3065c6b9 2024-12-16T19:44:56,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-16T19:44:56,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:56,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-16T19:44:56,040 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-16T19:44:56,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-16T19:44:56,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30728d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:44:56,078 DEBUG [hconnection-0x3008a6d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:56,079 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55972, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:56,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:44:56,084 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:44:56,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0984adae to 127.0.0.1:62515 2024-12-16T19:44:56,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:44:56,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-16T19:44:56,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:44:56,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-16T19:44:56,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-16T19:44:56,092 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:44:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T19:44:56,094 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:44:56,099 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:44:56,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T19:44:56,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741924_1100 (size=143) 2024-12-16T19:44:56,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741924_1100 (size=143) 2024-12-16T19:44:56,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741924_1100 (size=143) 2024-12-16T19:44:56,210 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:44:56,211 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e}] 2024-12-16T19:44:56,214 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:56,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:56,371 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:44:56,371 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:44:56,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-16T19:44:56,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:56,373 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 229ba0f80f49b0aa2be2d6b7010de68e 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-16T19:44:56,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-16T19:44:56,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:56,383 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing 76b4d4e09416bc1f5ddf2d01512c9b63 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-16T19:44:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T19:44:56,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e is 71, key is 12ccf0b043e8d11e832a848c85299938/cf:q/1734378295913/Put/seqid=0 2024-12-16T19:44:56,459 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63 is 71, key is 0695add4f3f4832145418b8f6431d26a/cf:q/1734378295888/Put/seqid=0 2024-12-16T19:44:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741925_1101 (size=8031) 2024-12-16T19:44:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741925_1101 (size=8031) 2024-12-16T19:44:56,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741925_1101 (size=8031) 2024-12-16T19:44:56,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:56,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741926_1102 (size=5242) 2024-12-16T19:44:56,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741926_1102 (size=5242) 2024-12-16T19:44:56,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741926_1102 (size=5242) 2024-12-16T19:44:56,575 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:56,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:56,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/.tmp/cf/49567492d33a43dba1a4119a71616352, store: [table=testExportWithResetTtl family=cf region=229ba0f80f49b0aa2be2d6b7010de68e] 2024-12-16T19:44:56,606 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/.tmp/cf/49567492d33a43dba1a4119a71616352 is 199, key is 12feec01bac6749fdae72df19cb2d8ae4/cf:q/1734378295913/Put/seqid=0 2024-12-16T19:44:56,631 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:56,636 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/.tmp/cf/e83491e899ba4ed6976403da5ada7f8d, store: [table=testExportWithResetTtl family=cf region=76b4d4e09416bc1f5ddf2d01512c9b63] 2024-12-16T19:44:56,637 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/.tmp/cf/e83491e899ba4ed6976403da5ada7f8d is 199, key is 0e7004a00653a1079edf6d72d5f593925/cf:q/1734378295888/Put/seqid=0 2024-12-16T19:44:56,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741927_1103 (size=14127) 2024-12-16T19:44:56,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741927_1103 (size=14127) 2024-12-16T19:44:56,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741927_1103 (size=14127) 2024-12-16T19:44:56,702 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/.tmp/cf/49567492d33a43dba1a4119a71616352 2024-12-16T19:44:56,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T19:44:56,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741928_1104 (size=6268) 2024-12-16T19:44:56,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741928_1104 (size=6268) 2024-12-16T19:44:56,748 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/.tmp/cf/49567492d33a43dba1a4119a71616352 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf/49567492d33a43dba1a4119a71616352 2024-12-16T19:44:56,754 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/.tmp/cf/e83491e899ba4ed6976403da5ada7f8d 2024-12-16T19:44:56,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741928_1104 (size=6268) 2024-12-16T19:44:56,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf/49567492d33a43dba1a4119a71616352, entries=45, sequenceid=5, filesize=13.8 K 2024-12-16T19:44:56,784 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 229ba0f80f49b0aa2be2d6b7010de68e in 412ms, sequenceid=5, compaction requested=false 2024-12-16T19:44:56,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-16T19:44:56,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 229ba0f80f49b0aa2be2d6b7010de68e: 2024-12-16T19:44:56,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. for snaptb-testExportWithResetTtl completed. 2024-12-16T19:44:56,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-16T19:44:56,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:56,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf/49567492d33a43dba1a4119a71616352] hfiles 2024-12-16T19:44:56,788 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf/49567492d33a43dba1a4119a71616352 for snapshot=snaptb-testExportWithResetTtl 2024-12-16T19:44:56,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/.tmp/cf/e83491e899ba4ed6976403da5ada7f8d as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf/e83491e899ba4ed6976403da5ada7f8d 2024-12-16T19:44:56,816 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf/e83491e899ba4ed6976403da5ada7f8d, entries=5, sequenceid=5, filesize=6.1 K 2024-12-16T19:44:56,818 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 76b4d4e09416bc1f5ddf2d01512c9b63 in 436ms, sequenceid=5, compaction requested=false 2024-12-16T19:44:56,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for 76b4d4e09416bc1f5ddf2d01512c9b63: 2024-12-16T19:44:56,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. for snaptb-testExportWithResetTtl completed. 2024-12-16T19:44:56,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-16T19:44:56,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:44:56,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf/e83491e899ba4ed6976403da5ada7f8d] hfiles 2024-12-16T19:44:56,819 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf/e83491e899ba4ed6976403da5ada7f8d for snapshot=snaptb-testExportWithResetTtl 2024-12-16T19:44:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741929_1105 (size=100) 2024-12-16T19:44:56,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741929_1105 (size=100) 2024-12-16T19:44:56,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741929_1105 (size=100) 2024-12-16T19:44:56,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:44:56,846 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-16T19:44:56,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-16T19:44:56,847 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:56,847 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:56,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e in 638 msec 2024-12-16T19:44:56,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741930_1106 (size=100) 2024-12-16T19:44:56,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741930_1106 (size=100) 2024-12-16T19:44:56,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741930_1106 (size=100) 2024-12-16T19:44:56,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:44:56,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-16T19:44:56,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-16T19:44:56,861 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:56,861 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:56,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-16T19:44:56,875 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63 in 655 msec 2024-12-16T19:44:56,875 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:44:56,879 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:44:56,887 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:44:56,887 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:44:56,887 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:44:56,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63] hfiles 2024-12-16T19:44:56,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:44:56,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:44:56,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741931_1107 (size=284) 2024-12-16T19:44:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741931_1107 (size=284) 2024-12-16T19:44:56,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741931_1107 (size=284) 2024-12-16T19:44:56,914 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:44:56,914 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-16T19:44:56,915 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-16T19:44:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741932_1108 (size=923) 2024-12-16T19:44:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741932_1108 (size=923) 2024-12-16T19:44:56,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741932_1108 (size=923) 2024-12-16T19:44:56,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:44:56,964 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:44:56,965 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-16T19:44:56,968 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:44:56,968 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-16T19:44:56,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 880 msec 2024-12-16T19:44:57,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-16T19:44:57,217 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-16T19:44:57,233 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233 2024-12-16T19:44:57,233 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:57,287 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:44:57,287 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-16T19:44:57,290 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:44:57,314 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-16T19:44:57,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741933_1109 (size=143) 2024-12-16T19:44:57,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741933_1109 (size=143) 2024-12-16T19:44:57,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741933_1109 (size=143) 2024-12-16T19:44:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741934_1110 (size=923) 2024-12-16T19:44:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741934_1110 (size=923) 2024-12-16T19:44:57,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741934_1110 (size=923) 2024-12-16T19:44:57,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741935_1111 (size=141) 2024-12-16T19:44:57,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741935_1111 (size=141) 2024-12-16T19:44:57,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741935_1111 (size=141) 2024-12-16T19:44:57,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:57,868 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:57,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:57,869 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-15265971407190526439.jar 2024-12-16T19:44:59,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-9564718143832326162.jar 2024-12-16T19:44:59,156 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,158 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,159 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,160 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,160 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:44:59,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:44:59,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:44:59,161 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:44:59,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:44:59,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:44:59,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:44:59,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:44:59,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:44:59,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:44:59,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:44:59,165 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:44:59,168 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:44:59,169 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:59,170 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:59,171 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:44:59,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:59,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:44:59,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:44:59,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:44:59,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741936_1112 (size=6350922) 2024-12-16T19:44:59,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741936_1112 (size=6350922) 2024-12-16T19:44:59,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741936_1112 (size=6350922) 2024-12-16T19:44:59,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741937_1113 (size=127628) 2024-12-16T19:44:59,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741937_1113 (size=127628) 2024-12-16T19:44:59,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741937_1113 (size=127628) 2024-12-16T19:44:59,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741938_1114 (size=2172137) 2024-12-16T19:44:59,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741938_1114 (size=2172137) 2024-12-16T19:44:59,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741938_1114 (size=2172137) 2024-12-16T19:44:59,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741939_1115 (size=213228) 2024-12-16T19:44:59,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741939_1115 (size=213228) 2024-12-16T19:44:59,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741939_1115 (size=213228) 2024-12-16T19:44:59,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741940_1116 (size=1877034) 2024-12-16T19:44:59,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741940_1116 (size=1877034) 2024-12-16T19:44:59,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741940_1116 (size=1877034) 2024-12-16T19:44:59,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741941_1117 (size=533455) 2024-12-16T19:44:59,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741941_1117 (size=533455) 2024-12-16T19:44:59,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741941_1117 (size=533455) 2024-12-16T19:45:00,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741942_1118 (size=7280644) 2024-12-16T19:45:00,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741942_1118 (size=7280644) 2024-12-16T19:45:00,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741942_1118 (size=7280644) 2024-12-16T19:45:00,190 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:45:00,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741943_1119 (size=451756) 2024-12-16T19:45:00,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741943_1119 (size=451756) 2024-12-16T19:45:00,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741943_1119 (size=451756) 2024-12-16T19:45:00,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741944_1120 (size=4188619) 2024-12-16T19:45:00,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741944_1120 (size=4188619) 2024-12-16T19:45:00,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741944_1120 (size=4188619) 2024-12-16T19:45:00,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741945_1121 (size=20406) 2024-12-16T19:45:00,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741945_1121 (size=20406) 2024-12-16T19:45:00,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741945_1121 (size=20406) 2024-12-16T19:45:00,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741946_1122 (size=75495) 2024-12-16T19:45:00,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741946_1122 (size=75495) 2024-12-16T19:45:00,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741946_1122 (size=75495) 2024-12-16T19:45:00,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741947_1123 (size=45609) 2024-12-16T19:45:00,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741947_1123 (size=45609) 2024-12-16T19:45:00,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741947_1123 (size=45609) 2024-12-16T19:45:00,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741948_1124 (size=110084) 2024-12-16T19:45:00,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741948_1124 (size=110084) 2024-12-16T19:45:00,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741948_1124 (size=110084) 2024-12-16T19:45:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741949_1125 (size=1323991) 2024-12-16T19:45:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741949_1125 (size=1323991) 2024-12-16T19:45:00,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741949_1125 (size=1323991) 2024-12-16T19:45:00,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741950_1126 (size=23076) 2024-12-16T19:45:00,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741950_1126 (size=23076) 2024-12-16T19:45:00,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741950_1126 (size=23076) 2024-12-16T19:45:00,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741951_1127 (size=126803) 2024-12-16T19:45:00,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741951_1127 (size=126803) 2024-12-16T19:45:00,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741951_1127 (size=126803) 2024-12-16T19:45:00,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741952_1128 (size=322274) 2024-12-16T19:45:01,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741952_1128 (size=322274) 2024-12-16T19:45:01,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741952_1128 (size=322274) 2024-12-16T19:45:01,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741953_1129 (size=1832290) 2024-12-16T19:45:01,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741953_1129 (size=1832290) 2024-12-16T19:45:01,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741953_1129 (size=1832290) 2024-12-16T19:45:01,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741954_1130 (size=30081) 2024-12-16T19:45:01,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741954_1130 (size=30081) 2024-12-16T19:45:01,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741954_1130 (size=30081) 2024-12-16T19:45:01,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741955_1131 (size=53616) 2024-12-16T19:45:01,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741955_1131 (size=53616) 2024-12-16T19:45:01,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741955_1131 (size=53616) 2024-12-16T19:45:01,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741956_1132 (size=29229) 2024-12-16T19:45:01,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741956_1132 (size=29229) 2024-12-16T19:45:01,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741956_1132 (size=29229) 2024-12-16T19:45:01,920 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3893a032022fb38e1a88fc4547193141 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:45:01,925 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c9c072b54498c6df337346164c585ad9 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:45:01,925 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 229ba0f80f49b0aa2be2d6b7010de68e changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:45:01,925 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 2eafc8c30dc3068090fca07c947c39fa changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:45:01,925 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 3e93dad405937162271b1d330fba4cbb changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:45:01,925 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 76b4d4e09416bc1f5ddf2d01512c9b63 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:45:01,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741957_1133 (size=169089) 2024-12-16T19:45:01,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741957_1133 (size=169089) 2024-12-16T19:45:01,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741957_1133 (size=169089) 2024-12-16T19:45:02,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741958_1134 (size=5175431) 2024-12-16T19:45:02,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741958_1134 (size=5175431) 2024-12-16T19:45:02,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741958_1134 (size=5175431) 2024-12-16T19:45:02,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741959_1135 (size=136454) 2024-12-16T19:45:02,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741959_1135 (size=136454) 2024-12-16T19:45:02,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741959_1135 (size=136454) 2024-12-16T19:45:02,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741960_1136 (size=907467) 2024-12-16T19:45:02,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741960_1136 (size=907467) 2024-12-16T19:45:02,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741960_1136 (size=907467) 2024-12-16T19:45:02,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741961_1137 (size=3317408) 2024-12-16T19:45:02,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741961_1137 (size=3317408) 2024-12-16T19:45:02,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741961_1137 (size=3317408) 2024-12-16T19:45:03,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741962_1138 (size=503880) 2024-12-16T19:45:03,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741962_1138 (size=503880) 2024-12-16T19:45:03,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741962_1138 (size=503880) 2024-12-16T19:45:03,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741963_1139 (size=4695811) 2024-12-16T19:45:03,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741963_1139 (size=4695811) 2024-12-16T19:45:03,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741963_1139 (size=4695811) 2024-12-16T19:45:03,122 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:45:03,148 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-16T19:45:03,167 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=32.9 K 2024-12-16T19:45:03,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741964_1140 (size=686) 2024-12-16T19:45:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741964_1140 (size=686) 2024-12-16T19:45:03,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741964_1140 (size=686) 2024-12-16T19:45:03,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741965_1141 (size=15) 2024-12-16T19:45:03,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741965_1141 (size=15) 2024-12-16T19:45:03,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741965_1141 (size=15) 2024-12-16T19:45:03,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741966_1142 (size=304875) 2024-12-16T19:45:03,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741966_1142 (size=304875) 2024-12-16T19:45:03,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741966_1142 (size=304875) 2024-12-16T19:45:04,086 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:45:04,086 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:45:04,369 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0002_000001 (auth:SIMPLE) from 127.0.0.1:38606 2024-12-16T19:45:13,488 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0002_000001 (auth:SIMPLE) from 127.0.0.1:42782 2024-12-16T19:45:13,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741967_1143 (size=350549) 2024-12-16T19:45:13,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741967_1143 (size=350549) 2024-12-16T19:45:13,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741967_1143 (size=350549) 2024-12-16T19:45:15,817 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0002_000001 (auth:SIMPLE) from 127.0.0.1:39296 2024-12-16T19:45:22,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741968_1144 (size=14127) 2024-12-16T19:45:22,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741968_1144 (size=14127) 2024-12-16T19:45:22,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741968_1144 (size=14127) 2024-12-16T19:45:22,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741969_1145 (size=8031) 2024-12-16T19:45:22,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741969_1145 (size=8031) 2024-12-16T19:45:22,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741969_1145 (size=8031) 2024-12-16T19:45:23,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741970_1146 (size=6268) 2024-12-16T19:45:23,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741970_1146 (size=6268) 2024-12-16T19:45:23,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741970_1146 (size=6268) 2024-12-16T19:45:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741971_1147 (size=5242) 2024-12-16T19:45:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741971_1147 (size=5242) 2024-12-16T19:45:23,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741971_1147 (size=5242) 2024-12-16T19:45:23,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741972_1148 (size=17462) 2024-12-16T19:45:23,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741972_1148 (size=17462) 2024-12-16T19:45:23,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741972_1148 (size=17462) 2024-12-16T19:45:23,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741973_1149 (size=461) 2024-12-16T19:45:23,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741973_1149 (size=461) 2024-12-16T19:45:23,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741973_1149 (size=461) 2024-12-16T19:45:23,903 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0002/container_1734378249643_0002_01_000002/launch_container.sh] 2024-12-16T19:45:23,903 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0002/container_1734378249643_0002_01_000002/container_tokens] 2024-12-16T19:45:23,903 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0002/container_1734378249643_0002_01_000002/sysfs] 2024-12-16T19:45:23,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741974_1150 (size=17462) 2024-12-16T19:45:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741974_1150 (size=17462) 2024-12-16T19:45:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741974_1150 (size=17462) 2024-12-16T19:45:24,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741975_1151 (size=350549) 2024-12-16T19:45:24,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741975_1151 (size=350549) 2024-12-16T19:45:24,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741975_1151 (size=350549) 2024-12-16T19:45:24,255 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:45:24,508 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0002_000001 (auth:SIMPLE) from 127.0.0.1:39306 2024-12-16T19:45:26,420 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:45:26,421 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:45:26,436 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-16T19:45:26,437 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:45:26,437 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:45:26,437 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-16T19:45:26,438 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-16T19:45:26,438 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-16T19:45:26,438 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-16T19:45:26,438 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-16T19:45:26,438 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378297233/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-16T19:45:26,454 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-16T19:45:26,454 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-16T19:45:26,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:26,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T19:45:26,457 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378326457"}]},"ts":"1734378326457"} 2024-12-16T19:45:26,459 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-16T19:45:26,512 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-16T19:45:26,513 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-16T19:45:26,515 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, UNASSIGN}] 2024-12-16T19:45:26,517 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, UNASSIGN 2024-12-16T19:45:26,517 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, UNASSIGN 2024-12-16T19:45:26,518 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=229ba0f80f49b0aa2be2d6b7010de68e, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:26,520 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=76b4d4e09416bc1f5ddf2d01512c9b63, regionState=CLOSING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:26,521 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:45:26,521 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:45:26,521 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=7a6e4bf70377,45217,1734378236605, table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-16T19:45:26,523 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:45:26,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:45:26,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T19:45:26,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:26,676 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:45:26,676 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:45:26,676 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 229ba0f80f49b0aa2be2d6b7010de68e, disabling compactions & flushes 2024-12-16T19:45:26,676 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:45:26,676 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:45:26,676 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. after waiting 0 ms 2024-12-16T19:45:26,676 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:45:26,677 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:26,682 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:45:26,682 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:45:26,682 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing 76b4d4e09416bc1f5ddf2d01512c9b63, disabling compactions & flushes 2024-12-16T19:45:26,682 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:45:26,682 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:45:26,682 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. after waiting 0 ms 2024-12-16T19:45:26,682 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:45:26,698 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T19:45:26,700 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:45:26,700 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e. 2024-12-16T19:45:26,700 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 229ba0f80f49b0aa2be2d6b7010de68e: 2024-12-16T19:45:26,702 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:45:26,703 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=229ba0f80f49b0aa2be2d6b7010de68e, regionState=CLOSED 2024-12-16T19:45:26,707 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T19:45:26,708 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:45:26,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-16T19:45:26,708 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63. 2024-12-16T19:45:26,708 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure 229ba0f80f49b0aa2be2d6b7010de68e, server=7a6e4bf70377,42861,1734378237024 in 184 msec 2024-12-16T19:45:26,708 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for 76b4d4e09416bc1f5ddf2d01512c9b63: 2024-12-16T19:45:26,710 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed 76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:45:26,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=229ba0f80f49b0aa2be2d6b7010de68e, UNASSIGN in 193 msec 2024-12-16T19:45:26,711 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=76b4d4e09416bc1f5ddf2d01512c9b63, regionState=CLOSED 2024-12-16T19:45:26,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-16T19:45:26,722 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure 76b4d4e09416bc1f5ddf2d01512c9b63, server=7a6e4bf70377,45217,1734378236605 in 196 msec 2024-12-16T19:45:26,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=51, resume processing ppid=50 2024-12-16T19:45:26,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=76b4d4e09416bc1f5ddf2d01512c9b63, UNASSIGN in 207 msec 2024-12-16T19:45:26,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-16T19:45:26,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 214 msec 2024-12-16T19:45:26,734 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378326734"}]},"ts":"1734378326734"} 2024-12-16T19:45:26,737 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-16T19:45:26,752 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-16T19:45:26,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T19:45:26,762 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 305 msec 2024-12-16T19:45:27,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-16T19:45:27,062 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-16T19:45:27,063 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-16T19:45:27,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:27,066 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:27,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-16T19:45:27,067 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:27,069 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-16T19:45:27,075 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:45:27,076 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:45:27,083 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/recovered.edits] 2024-12-16T19:45:27,091 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/recovered.edits] 2024-12-16T19:45:27,098 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf/49567492d33a43dba1a4119a71616352 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/cf/49567492d33a43dba1a4119a71616352 2024-12-16T19:45:27,118 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf/e83491e899ba4ed6976403da5ada7f8d to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/cf/e83491e899ba4ed6976403da5ada7f8d 2024-12-16T19:45:27,136 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/recovered.edits/8.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e/recovered.edits/8.seqid 2024-12-16T19:45:27,137 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:45:27,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,143 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T19:45:27,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T19:45:27,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-16T19:45:27,146 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/recovered.edits/8.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63/recovered.edits/8.seqid 2024-12-16T19:45:27,148 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportWithResetTtl/76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:45:27,149 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-16T19:45:27,150 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-16T19:45:27,151 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-12-16T19:45:27,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,157 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:27,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:27,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:27,158 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data null 2024-12-16T19:45:27,158 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:45:27,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-16T19:45:27,159 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:27,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T19:45:27,160 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:27,161 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:27,161 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:27,161 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:27,166 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241216ebb93fb74f334a708ec2d4c61f5bdf4c_229ba0f80f49b0aa2be2d6b7010de68e 2024-12-16T19:45:27,172 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e2024121638be028fc56741088a0b08bd048d7f37_76b4d4e09416bc1f5ddf2d01512c9b63 2024-12-16T19:45:27,173 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-12-16T19:45:27,178 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:27,193 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-16T19:45:27,202 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testExportWithResetTtl' descriptor. 2024-12-16T19:45:27,216 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:27,216 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testExportWithResetTtl' from region states. 2024-12-16T19:45:27,216 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378327216"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:27,216 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378327216"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:27,235 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:45:27,235 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 76b4d4e09416bc1f5ddf2d01512c9b63, NAME => 'testExportWithResetTtl,,1734378293627.76b4d4e09416bc1f5ddf2d01512c9b63.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 229ba0f80f49b0aa2be2d6b7010de68e, NAME => 'testExportWithResetTtl,1,1734378293627.229ba0f80f49b0aa2be2d6b7010de68e.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:45:27,236 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testExportWithResetTtl' as deleted. 2024-12-16T19:45:27,236 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378327236"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:27,242 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-16T19:45:27,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T19:45:27,263 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-16T19:45:27,282 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 200 msec 2024-12-16T19:45:27,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-16T19:45:27,464 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-16T19:45:27,465 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-16T19:45:27,465 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-16T19:45:27,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:27,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T19:45:27,482 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378327482"}]},"ts":"1734378327482"} 2024-12-16T19:45:27,488 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-16T19:45:27,505 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-16T19:45:27,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-16T19:45:27,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, UNASSIGN}] 2024-12-16T19:45:27,537 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, UNASSIGN 2024-12-16T19:45:27,538 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, UNASSIGN 2024-12-16T19:45:27,547 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=2eafc8c30dc3068090fca07c947c39fa, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:27,547 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=c9c072b54498c6df337346164c585ad9, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:27,570 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:45:27,570 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE; CloseRegionProcedure 2eafc8c30dc3068090fca07c947c39fa, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:45:27,574 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:45:27,574 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE; CloseRegionProcedure c9c072b54498c6df337346164c585ad9, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:45:27,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T19:45:27,728 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:27,729 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:45:27,729 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:45:27,729 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing 2eafc8c30dc3068090fca07c947c39fa, disabling compactions & flushes 2024-12-16T19:45:27,729 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:45:27,729 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:45:27,729 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. after waiting 0 ms 2024-12-16T19:45:27,729 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:45:27,752 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:27,753 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close c9c072b54498c6df337346164c585ad9 2024-12-16T19:45:27,753 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:45:27,753 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing c9c072b54498c6df337346164c585ad9, disabling compactions & flushes 2024-12-16T19:45:27,753 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:45:27,753 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:45:27,753 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. after waiting 0 ms 2024-12-16T19:45:27,753 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:45:27,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T19:45:28,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T19:45:28,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_814444273_22 at /127.0.0.1:57444 [Receiving block BP-1238680177-172.17.0.2-1734378229927:blk_1073741834_1010] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 500ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/, blockId=1073741834, seqno=12 2024-12-16T19:45:28,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_814444273_22 at /127.0.0.1:53240 [Receiving block BP-1238680177-172.17.0.2-1734378229927:blk_1073741834_1010] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 500ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/, blockId=1073741834, seqno=12 2024-12-16T19:45:28,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_814444273_22 at /127.0.0.1:40606 [Receiving block BP-1238680177-172.17.0.2-1734378229927:blk_1073741834_1010] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 500ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/, blockId=1073741834, seqno=12 2024-12-16T19:45:28,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1732422252_22 at /127.0.0.1:53250 [Receiving block BP-1238680177-172.17.0.2-1734378229927:blk_1073741835_1011] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 476ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/, blockId=1073741835, seqno=11 2024-12-16T19:45:28,230 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1732422252_22 at /127.0.0.1:40622 [Receiving block BP-1238680177-172.17.0.2-1734378229927:blk_1073741835_1011] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 476ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/, blockId=1073741835, seqno=11 2024-12-16T19:45:28,314 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1732422252_22 at /127.0.0.1:57460 [Receiving block BP-1238680177-172.17.0.2-1734378229927:blk_1073741835_1011] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 560ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/, blockId=1073741835, seqno=11 2024-12-16T19:45:28,314 INFO [AsyncFSWAL-0-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c-prefix:7a6e4bf70377,42861,1734378237024 {}] wal.AbstractFSWAL(1183): Slow sync cost: 583 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK], DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK], DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK]] 2024-12-16T19:45:28,315 INFO [AsyncFSWAL-0-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c-prefix:7a6e4bf70377,42741,1734378236851 {}] wal.AbstractFSWAL(1183): Slow sync cost: 560 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK], DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK], DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK]] 2024-12-16T19:45:28,322 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:45:28,324 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:45:28,324 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:45:28,324 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa. 2024-12-16T19:45:28,324 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for 2eafc8c30dc3068090fca07c947c39fa: 2024-12-16T19:45:28,325 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:45:28,325 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9. 2024-12-16T19:45:28,325 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for c9c072b54498c6df337346164c585ad9: 2024-12-16T19:45:28,327 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed 2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:45:28,328 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=2eafc8c30dc3068090fca07c947c39fa, regionState=CLOSED 2024-12-16T19:45:28,328 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed c9c072b54498c6df337346164c585ad9 2024-12-16T19:45:28,329 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=c9c072b54498c6df337346164c585ad9, regionState=CLOSED 2024-12-16T19:45:28,336 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=58 2024-12-16T19:45:28,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=59 2024-12-16T19:45:28,338 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=59, state=SUCCESS; CloseRegionProcedure c9c072b54498c6df337346164c585ad9, server=7a6e4bf70377,42741,1734378236851 in 760 msec 2024-12-16T19:45:28,340 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=58, state=SUCCESS; CloseRegionProcedure 2eafc8c30dc3068090fca07c947c39fa, server=7a6e4bf70377,42861,1734378237024 in 762 msec 2024-12-16T19:45:28,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2eafc8c30dc3068090fca07c947c39fa, UNASSIGN in 807 msec 2024-12-16T19:45:28,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=57 2024-12-16T19:45:28,342 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=c9c072b54498c6df337346164c585ad9, UNASSIGN in 808 msec 2024-12-16T19:45:28,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-16T19:45:28,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 828 msec 2024-12-16T19:45:28,352 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378328352"}]},"ts":"1734378328352"} 2024-12-16T19:45:28,354 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-16T19:45:28,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T19:45:28,711 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-16T19:45:28,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 1.3060 sec 2024-12-16T19:45:29,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-16T19:45:29,609 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-16T19:45:29,610 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-16T19:45:29,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:29,613 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:29,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-16T19:45:29,616 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-16T19:45:29,616 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:29,623 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:45:29,623 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9 2024-12-16T19:45:29,626 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/recovered.edits] 2024-12-16T19:45:29,627 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/recovered.edits] 2024-12-16T19:45:29,631 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf/bb5be642aa384501b3951a3cb616c963 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/cf/bb5be642aa384501b3951a3cb616c963 2024-12-16T19:45:29,631 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf/64a0c82ba82a44f1aef440297182fa49 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/cf/64a0c82ba82a44f1aef440297182fa49 2024-12-16T19:45:29,638 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9/recovered.edits/9.seqid 2024-12-16T19:45:29,638 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa/recovered.edits/9.seqid 2024-12-16T19:45:29,639 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:45:29,640 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithResetTtl/c9c072b54498c6df337346164c585ad9 2024-12-16T19:45:29,640 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-16T19:45:29,640 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-16T19:45:29,643 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-12-16T19:45:29,650 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e2024121609c512ec91664d96ab39d423b66597c1_2eafc8c30dc3068090fca07c947c39fa 2024-12-16T19:45:29,650 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b2024121670deabd229864915b7b34f996eb6950c_c9c072b54498c6df337346164c585ad9 2024-12-16T19:45:29,651 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-12-16T19:45:29,654 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:29,656 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-16T19:45:29,659 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-16T19:45:29,661 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:29,661 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-16T19:45:29,661 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378329661"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:29,661 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378329661"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:29,663 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:45:29,664 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 2eafc8c30dc3068090fca07c947c39fa, NAME => 'testtb-testExportWithResetTtl,,1734378287726.2eafc8c30dc3068090fca07c947c39fa.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c9c072b54498c6df337346164c585ad9, NAME => 'testtb-testExportWithResetTtl,1,1734378287726.c9c072b54498c6df337346164c585ad9.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:45:29,664 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-16T19:45:29,664 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378329664"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:29,666 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-16T19:45:29,938 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-16T19:45:29,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 328 msec 2024-12-16T19:45:29,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T19:45:29,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T19:45:29,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T19:45:29,966 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-16T19:45:29,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:29,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-16T19:45:29,986 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-16T19:45:29,996 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-16T19:45:30,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-16T19:45:30,002 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-16T19:45:30,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-16T19:45:30,006 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-16T19:45:30,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-16T19:45:30,029 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=778 (was 777) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:37370 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 58821) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38489 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40647 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:40647 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2262 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:35296 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:39684 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=803 (was 811), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1110 (was 1127), ProcessCount=17 (was 17), AvailableMemoryMB=3035 (was 2665) - AvailableMemoryMB LEAK? - 2024-12-16T19:45:30,029 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=778 is superior to 500 2024-12-16T19:45:30,051 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=778, OpenFileDescriptor=803, MaxFileDescriptor=1048576, SystemLoadAverage=1110, ProcessCount=17, AvailableMemoryMB=3033 2024-12-16T19:45:30,051 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=778 is superior to 500 2024-12-16T19:45:30,053 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:45:30,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:30,055 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:45:30,055 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-16T19:45:30,056 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:45:30,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T19:45:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741976_1152 (size=443) 2024-12-16T19:45:30,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741976_1152 (size=443) 2024-12-16T19:45:30,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741976_1152 (size=443) 2024-12-16T19:45:30,070 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6e64496113249683a5df78b85fbdb410, NAME => 'testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:30,074 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 6f7c9d58bb51971a888d867212624f60, NAME => 'testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:30,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741977_1153 (size=68) 2024-12-16T19:45:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741977_1153 (size=68) 2024-12-16T19:45:30,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741977_1153 (size=68) 2024-12-16T19:45:30,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:30,093 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 6e64496113249683a5df78b85fbdb410, disabling compactions & flushes 2024-12-16T19:45:30,094 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,094 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,094 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. after waiting 0 ms 2024-12-16T19:45:30,094 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,094 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,094 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6e64496113249683a5df78b85fbdb410: 2024-12-16T19:45:30,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741978_1154 (size=68) 2024-12-16T19:45:30,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741978_1154 (size=68) 2024-12-16T19:45:30,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741978_1154 (size=68) 2024-12-16T19:45:30,099 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:30,099 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 6f7c9d58bb51971a888d867212624f60, disabling compactions & flushes 2024-12-16T19:45:30,100 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,100 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,100 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. after waiting 0 ms 2024-12-16T19:45:30,100 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,100 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,100 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 6f7c9d58bb51971a888d867212624f60: 2024-12-16T19:45:30,101 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:45:30,101 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734378330101"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378330101"}]},"ts":"1734378330101"} 2024-12-16T19:45:30,101 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734378330101"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378330101"}]},"ts":"1734378330101"} 2024-12-16T19:45:30,104 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:45:30,105 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:45:30,105 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378330105"}]},"ts":"1734378330105"} 2024-12-16T19:45:30,107 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-16T19:45:30,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T19:45:30,207 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:45:30,208 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:45:30,208 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:45:30,208 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:45:30,209 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:45:30,209 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:45:30,209 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:45:30,209 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:45:30,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, ASSIGN}] 2024-12-16T19:45:30,211 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, ASSIGN 2024-12-16T19:45:30,211 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, ASSIGN 2024-12-16T19:45:30,212 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:45:30,212 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:45:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T19:45:30,362 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:45:30,363 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=6e64496113249683a5df78b85fbdb410, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:30,363 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=6f7c9d58bb51971a888d867212624f60, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:30,367 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE; OpenRegionProcedure 6e64496113249683a5df78b85fbdb410, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:45:30,373 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE; OpenRegionProcedure 6f7c9d58bb51971a888d867212624f60, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:45:30,524 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:30,530 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:30,530 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,530 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 6e64496113249683a5df78b85fbdb410, NAME => 'testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:45:30,531 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. service=AccessControlService 2024-12-16T19:45:30,531 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:45:30,531 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,531 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:30,531 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,531 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,537 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,537 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => 6f7c9d58bb51971a888d867212624f60, NAME => 'testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:45:30,537 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. service=AccessControlService 2024-12-16T19:45:30,538 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:45:30,538 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,538 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:30,538 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,538 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,545 INFO [StoreOpener-6e64496113249683a5df78b85fbdb410-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,560 INFO [StoreOpener-6f7c9d58bb51971a888d867212624f60-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,562 INFO [StoreOpener-6e64496113249683a5df78b85fbdb410-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6e64496113249683a5df78b85fbdb410 columnFamilyName cf 2024-12-16T19:45:30,563 INFO [StoreOpener-6f7c9d58bb51971a888d867212624f60-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6f7c9d58bb51971a888d867212624f60 columnFamilyName cf 2024-12-16T19:45:30,566 DEBUG [StoreOpener-6e64496113249683a5df78b85fbdb410-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:30,567 INFO [StoreOpener-6e64496113249683a5df78b85fbdb410-1 {}] regionserver.HStore(327): Store=6e64496113249683a5df78b85fbdb410/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:45:30,568 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,569 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,572 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:30,575 DEBUG [StoreOpener-6f7c9d58bb51971a888d867212624f60-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:30,577 INFO [StoreOpener-6f7c9d58bb51971a888d867212624f60-1 {}] regionserver.HStore(327): Store=6f7c9d58bb51971a888d867212624f60/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:45:30,579 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,580 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,583 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:30,587 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:45:30,589 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 6e64496113249683a5df78b85fbdb410; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65334522, jitterRate=-0.026439756155014038}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:45:30,590 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 6e64496113249683a5df78b85fbdb410: 2024-12-16T19:45:30,591 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410., pid=66, masterSystemTime=1734378330524 2024-12-16T19:45:30,594 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,594 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:30,595 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=6e64496113249683a5df78b85fbdb410, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:30,611 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:45:30,612 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened 6f7c9d58bb51971a888d867212624f60; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70765147, jitterRate=0.05448286235332489}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:45:30,612 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for 6f7c9d58bb51971a888d867212624f60: 2024-12-16T19:45:30,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=64 2024-12-16T19:45:30,618 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60., pid=67, masterSystemTime=1734378330529 2024-12-16T19:45:30,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=64, state=SUCCESS; OpenRegionProcedure 6e64496113249683a5df78b85fbdb410, server=7a6e4bf70377,45217,1734378236605 in 232 msec 2024-12-16T19:45:30,628 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,628 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:30,635 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, ASSIGN in 407 msec 2024-12-16T19:45:30,635 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=6f7c9d58bb51971a888d867212624f60, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:30,653 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=65 2024-12-16T19:45:30,654 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=65, state=SUCCESS; OpenRegionProcedure 6f7c9d58bb51971a888d867212624f60, server=7a6e4bf70377,42861,1734378237024 in 274 msec 2024-12-16T19:45:30,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=63 2024-12-16T19:45:30,657 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, ASSIGN in 445 msec 2024-12-16T19:45:30,659 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:45:30,659 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378330659"}]},"ts":"1734378330659"} 2024-12-16T19:45:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T19:45:30,668 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-16T19:45:30,687 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:45:30,687 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-16T19:45:30,695 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T19:45:30,729 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0002_000001 (auth:SIMPLE) from 127.0.0.1:38664 2024-12-16T19:45:30,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:30,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:30,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:30,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:30,823 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:30,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:30,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:30,823 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:30,824 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:30,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 777 msec 2024-12-16T19:45:31,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-16T19:45:31,168 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-16T19:45:31,172 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-16T19:45:31,173 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:31,173 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:45:31,262 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:45:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378331262 (current time:1734378331262). 2024-12-16T19:45:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:45:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-16T19:45:31,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:45:31,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c1f9c56 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4970719f 2024-12-16T19:45:31,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27dc2c7e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:31,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:31,284 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c1f9c56 to 127.0.0.1:62515 2024-12-16T19:45:31,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:31,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26e9e01b to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e6f2b8f 2024-12-16T19:45:31,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@89651e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:31,335 DEBUG [hconnection-0x372f93b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:31,338 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:31,346 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52672, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26e9e01b to 127.0.0.1:62515 2024-12-16T19:45:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T19:45:31,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:45:31,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:45:31,354 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:45:31,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-16T19:45:31,355 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:45:31,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T19:45:31,359 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:45:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741979_1155 (size=170) 2024-12-16T19:45:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741979_1155 (size=170) 2024-12-16T19:45:31,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741979_1155 (size=170) 2024-12-16T19:45:31,427 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:45:31,427 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60}] 2024-12-16T19:45:31,429 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:31,429 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:31,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T19:45:31,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:31,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:31,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-16T19:45:31,583 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-16T19:45:31,586 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:31,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 6f7c9d58bb51971a888d867212624f60: 2024-12-16T19:45:31,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. for emptySnaptb0-testExportFileSystemState completed. 2024-12-16T19:45:31,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-16T19:45:31,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:31,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:45:31,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:31,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for 6e64496113249683a5df78b85fbdb410: 2024-12-16T19:45:31,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. for emptySnaptb0-testExportFileSystemState completed. 2024-12-16T19:45:31,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-16T19:45:31,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:31,591 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:45:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T19:45:31,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741981_1157 (size=71) 2024-12-16T19:45:31,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741981_1157 (size=71) 2024-12-16T19:45:31,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741981_1157 (size=71) 2024-12-16T19:45:31,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:31,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-16T19:45:31,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-16T19:45:31,715 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:31,715 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:31,718 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410 in 289 msec 2024-12-16T19:45:31,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741980_1156 (size=71) 2024-12-16T19:45:31,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741980_1156 (size=71) 2024-12-16T19:45:31,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741980_1156 (size=71) 2024-12-16T19:45:31,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:31,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-16T19:45:31,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-16T19:45:31,785 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:31,785 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:31,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=68 2024-12-16T19:45:31,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60 in 365 msec 2024-12-16T19:45:31,824 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:45:31,825 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:45:31,848 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:45:31,849 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:45:31,849 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:31,850 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:45:31,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T19:45:31,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741982_1158 (size=63) 2024-12-16T19:45:31,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741982_1158 (size=63) 2024-12-16T19:45:31,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741982_1158 (size=63) 2024-12-16T19:45:31,974 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:45:31,974 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-16T19:45:31,976 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-16T19:45:32,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741983_1159 (size=653) 2024-12-16T19:45:32,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741983_1159 (size=653) 2024-12-16T19:45:32,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741983_1159 (size=653) 2024-12-16T19:45:32,140 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:45:32,193 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:45:32,194 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-16T19:45:32,196 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:45:32,196 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-16T19:45:32,202 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 845 msec 2024-12-16T19:45:32,309 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:45:32,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-16T19:45:32,478 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-16T19:45:32,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:45:32,510 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:45:32,527 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-16T19:45:32,528 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:32,528 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:45:32,564 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:45:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378332564 (current time:1734378332564). 2024-12-16T19:45:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:45:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-16T19:45:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:45:32,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53087aef to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18f1e109 2024-12-16T19:45:32,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c59aab1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:32,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:32,646 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:32,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53087aef to 127.0.0.1:62515 2024-12-16T19:45:32,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:32,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1cf6c6cc to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b1885f2 2024-12-16T19:45:32,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a65ba02, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:32,686 DEBUG [hconnection-0x3ecd6352-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:32,688 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:32,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:32,698 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:32,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1cf6c6cc to 127.0.0.1:62515 2024-12-16T19:45:32,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:32,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T19:45:32,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:45:32,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:45:32,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-16T19:45:32,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T19:45:32,712 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:45:32,714 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:45:32,726 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:45:32,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T19:45:32,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741984_1160 (size=165) 2024-12-16T19:45:32,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741984_1160 (size=165) 2024-12-16T19:45:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741984_1160 (size=165) 2024-12-16T19:45:32,825 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:45:32,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60}] 2024-12-16T19:45:32,826 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:32,826 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:32,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:32,982 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:32,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-16T19:45:32,984 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-16T19:45:32,984 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:32,985 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing 6f7c9d58bb51971a888d867212624f60 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-16T19:45:32,986 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:32,986 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 6e64496113249683a5df78b85fbdb410 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-16T19:45:33,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T19:45:33,025 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410 is 71, key is 0b8a3ef98d0580443b43912b053083b7/cf:q/1734378332483/Put/seqid=0 2024-12-16T19:45:33,030 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60 is 71, key is 16ab1cb22cd479a8f36f7d7601594613/cf:q/1734378332509/Put/seqid=0 2024-12-16T19:45:33,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741985_1161 (size=5102) 2024-12-16T19:45:33,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741985_1161 (size=5102) 2024-12-16T19:45:33,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741985_1161 (size=5102) 2024-12-16T19:45:33,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:33,063 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:33,064 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/.tmp/cf/70c74b8402e44d439beee53f5a46f441, store: [table=testtb-testExportFileSystemState family=cf region=6e64496113249683a5df78b85fbdb410] 2024-12-16T19:45:33,065 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/.tmp/cf/70c74b8402e44d439beee53f5a46f441 is 209, key is 0a3c5288c3a55e229a3eb0fc74ee1c0b3/cf:q/1734378332483/Put/seqid=0 2024-12-16T19:45:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741986_1162 (size=8172) 2024-12-16T19:45:33,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741986_1162 (size=8172) 2024-12-16T19:45:33,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741986_1162 (size=8172) 2024-12-16T19:45:33,094 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:33,109 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:33,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/.tmp/cf/266ae43da1f046489b93b3281771fb10, store: [table=testtb-testExportFileSystemState family=cf region=6f7c9d58bb51971a888d867212624f60] 2024-12-16T19:45:33,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/.tmp/cf/266ae43da1f046489b93b3281771fb10 is 209, key is 130b2357afca4ca7b55029fddcf3abf45/cf:q/1734378332509/Put/seqid=0 2024-12-16T19:45:33,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741987_1163 (size=5918) 2024-12-16T19:45:33,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741987_1163 (size=5918) 2024-12-16T19:45:33,151 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/.tmp/cf/70c74b8402e44d439beee53f5a46f441 2024-12-16T19:45:33,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741987_1163 (size=5918) 2024-12-16T19:45:33,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/.tmp/cf/70c74b8402e44d439beee53f5a46f441 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf/70c74b8402e44d439beee53f5a46f441 2024-12-16T19:45:33,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741988_1164 (size=14999) 2024-12-16T19:45:33,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741988_1164 (size=14999) 2024-12-16T19:45:33,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741988_1164 (size=14999) 2024-12-16T19:45:33,178 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf/70c74b8402e44d439beee53f5a46f441, entries=3, sequenceid=6, filesize=5.8 K 2024-12-16T19:45:33,179 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 6e64496113249683a5df78b85fbdb410 in 193ms, sequenceid=6, compaction requested=false 2024-12-16T19:45:33,179 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-16T19:45:33,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 6e64496113249683a5df78b85fbdb410: 2024-12-16T19:45:33,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. for snaptb0-testExportFileSystemState completed. 2024-12-16T19:45:33,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-16T19:45:33,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:33,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf/70c74b8402e44d439beee53f5a46f441] hfiles 2024-12-16T19:45:33,180 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf/70c74b8402e44d439beee53f5a46f441 for snapshot=snaptb0-testExportFileSystemState 2024-12-16T19:45:33,191 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/.tmp/cf/266ae43da1f046489b93b3281771fb10 2024-12-16T19:45:33,218 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/.tmp/cf/266ae43da1f046489b93b3281771fb10 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf/266ae43da1f046489b93b3281771fb10 2024-12-16T19:45:33,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741989_1165 (size=110) 2024-12-16T19:45:33,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741989_1165 (size=110) 2024-12-16T19:45:33,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741989_1165 (size=110) 2024-12-16T19:45:33,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:33,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-16T19:45:33,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-16T19:45:33,230 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:33,230 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:33,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure 6e64496113249683a5df78b85fbdb410 in 409 msec 2024-12-16T19:45:33,252 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf/266ae43da1f046489b93b3281771fb10, entries=47, sequenceid=6, filesize=14.6 K 2024-12-16T19:45:33,259 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 6f7c9d58bb51971a888d867212624f60 in 274ms, sequenceid=6, compaction requested=false 2024-12-16T19:45:33,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for 6f7c9d58bb51971a888d867212624f60: 2024-12-16T19:45:33,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. for snaptb0-testExportFileSystemState completed. 2024-12-16T19:45:33,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-16T19:45:33,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:33,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf/266ae43da1f046489b93b3281771fb10] hfiles 2024-12-16T19:45:33,260 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf/266ae43da1f046489b93b3281771fb10 for snapshot=snaptb0-testExportFileSystemState 2024-12-16T19:45:33,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T19:45:33,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741990_1166 (size=110) 2024-12-16T19:45:33,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741990_1166 (size=110) 2024-12-16T19:45:33,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741990_1166 (size=110) 2024-12-16T19:45:33,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:33,776 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-16T19:45:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-16T19:45:33,777 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:33,777 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:33,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=73, resume processing ppid=71 2024-12-16T19:45:33,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure 6f7c9d58bb51971a888d867212624f60 in 960 msec 2024-12-16T19:45:33,789 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:45:33,791 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:45:33,809 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:45:33,809 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:45:33,809 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:33,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T19:45:33,820 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410] hfiles 2024-12-16T19:45:33,820 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:33,820 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741991_1167 (size=294) 2024-12-16T19:45:33,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741991_1167 (size=294) 2024-12-16T19:45:33,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741991_1167 (size=294) 2024-12-16T19:45:33,871 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:45:33,871 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-16T19:45:33,872 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-16T19:45:33,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741992_1168 (size=963) 2024-12-16T19:45:33,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741992_1168 (size=963) 2024-12-16T19:45:33,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741992_1168 (size=963) 2024-12-16T19:45:33,942 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:45:33,948 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:45:33,949 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-16T19:45:33,951 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:45:33,951 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-16T19:45:33,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 1.2470 sec 2024-12-16T19:45:34,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-16T19:45:34,822 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-16T19:45:34,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822 2024-12-16T19:45:34,822 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:34,870 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:34,870 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-16T19:45:34,873 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:45:34,901 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-16T19:45:34,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741993_1169 (size=165) 2024-12-16T19:45:34,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741993_1169 (size=165) 2024-12-16T19:45:34,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741993_1169 (size=165) 2024-12-16T19:45:34,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741994_1170 (size=963) 2024-12-16T19:45:35,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741994_1170 (size=963) 2024-12-16T19:45:35,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741994_1170 (size=963) 2024-12-16T19:45:35,402 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:35,402 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:35,403 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:35,404 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:35,874 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0002/container_1734378249643_0002_01_000001/launch_container.sh] 2024-12-16T19:45:35,874 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0002/container_1734378249643_0002_01_000001/container_tokens] 2024-12-16T19:45:35,875 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0002/container_1734378249643_0002_01_000001/sysfs] 2024-12-16T19:45:36,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-16T19:45:36,042 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-16T19:45:36,043 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-16T19:45:36,043 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-16T19:45:36,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-17241521752538684988.jar 2024-12-16T19:45:36,575 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,576 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-7419895855511162224.jar 2024-12-16T19:45:36,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,663 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,664 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:36,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:45:36,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:45:36,665 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:45:36,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:45:36,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:45:36,666 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:45:36,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:45:36,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:45:36,667 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:45:36,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:45:36,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:45:36,668 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:45:36,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:45:36,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:45:36,669 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:45:36,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:45:36,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:45:36,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:45:36,670 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:45:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741995_1171 (size=127628) 2024-12-16T19:45:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741995_1171 (size=127628) 2024-12-16T19:45:36,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741995_1171 (size=127628) 2024-12-16T19:45:36,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741996_1172 (size=2172137) 2024-12-16T19:45:36,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741996_1172 (size=2172137) 2024-12-16T19:45:36,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741996_1172 (size=2172137) 2024-12-16T19:45:36,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741997_1173 (size=213228) 2024-12-16T19:45:36,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741997_1173 (size=213228) 2024-12-16T19:45:36,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741997_1173 (size=213228) 2024-12-16T19:45:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741998_1174 (size=1877034) 2024-12-16T19:45:36,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741998_1174 (size=1877034) 2024-12-16T19:45:36,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741998_1174 (size=1877034) 2024-12-16T19:45:36,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741999_1175 (size=533455) 2024-12-16T19:45:36,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741999_1175 (size=533455) 2024-12-16T19:45:36,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741999_1175 (size=533455) 2024-12-16T19:45:37,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742000_1176 (size=7280644) 2024-12-16T19:45:37,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742000_1176 (size=7280644) 2024-12-16T19:45:37,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742000_1176 (size=7280644) 2024-12-16T19:45:37,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742001_1177 (size=4188619) 2024-12-16T19:45:37,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742001_1177 (size=4188619) 2024-12-16T19:45:37,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742001_1177 (size=4188619) 2024-12-16T19:45:37,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742002_1178 (size=6350922) 2024-12-16T19:45:37,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742002_1178 (size=6350922) 2024-12-16T19:45:37,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742002_1178 (size=6350922) 2024-12-16T19:45:37,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742003_1179 (size=20406) 2024-12-16T19:45:37,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742003_1179 (size=20406) 2024-12-16T19:45:37,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742003_1179 (size=20406) 2024-12-16T19:45:37,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742004_1180 (size=75495) 2024-12-16T19:45:37,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742004_1180 (size=75495) 2024-12-16T19:45:37,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742004_1180 (size=75495) 2024-12-16T19:45:37,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742005_1181 (size=45609) 2024-12-16T19:45:37,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742005_1181 (size=45609) 2024-12-16T19:45:37,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742005_1181 (size=45609) 2024-12-16T19:45:37,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742006_1182 (size=110084) 2024-12-16T19:45:37,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742006_1182 (size=110084) 2024-12-16T19:45:37,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742006_1182 (size=110084) 2024-12-16T19:45:37,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742007_1183 (size=1323991) 2024-12-16T19:45:37,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742007_1183 (size=1323991) 2024-12-16T19:45:37,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742007_1183 (size=1323991) 2024-12-16T19:45:37,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742008_1184 (size=23076) 2024-12-16T19:45:37,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742008_1184 (size=23076) 2024-12-16T19:45:37,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742008_1184 (size=23076) 2024-12-16T19:45:37,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742009_1185 (size=126803) 2024-12-16T19:45:37,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742009_1185 (size=126803) 2024-12-16T19:45:37,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742009_1185 (size=126803) 2024-12-16T19:45:37,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742010_1186 (size=322274) 2024-12-16T19:45:37,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742010_1186 (size=322274) 2024-12-16T19:45:37,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742010_1186 (size=322274) 2024-12-16T19:45:37,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742011_1187 (size=1832290) 2024-12-16T19:45:37,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742011_1187 (size=1832290) 2024-12-16T19:45:37,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742011_1187 (size=1832290) 2024-12-16T19:45:37,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742012_1188 (size=451756) 2024-12-16T19:45:37,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742012_1188 (size=451756) 2024-12-16T19:45:37,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742012_1188 (size=451756) 2024-12-16T19:45:38,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742013_1189 (size=30081) 2024-12-16T19:45:38,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742013_1189 (size=30081) 2024-12-16T19:45:38,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742013_1189 (size=30081) 2024-12-16T19:45:38,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742014_1190 (size=53616) 2024-12-16T19:45:38,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742014_1190 (size=53616) 2024-12-16T19:45:38,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742014_1190 (size=53616) 2024-12-16T19:45:38,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742015_1191 (size=29229) 2024-12-16T19:45:38,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742015_1191 (size=29229) 2024-12-16T19:45:38,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742015_1191 (size=29229) 2024-12-16T19:45:38,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742016_1192 (size=169089) 2024-12-16T19:45:38,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742016_1192 (size=169089) 2024-12-16T19:45:38,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742016_1192 (size=169089) 2024-12-16T19:45:38,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742017_1193 (size=5175431) 2024-12-16T19:45:38,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742017_1193 (size=5175431) 2024-12-16T19:45:38,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742017_1193 (size=5175431) 2024-12-16T19:45:38,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742018_1194 (size=136454) 2024-12-16T19:45:38,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742018_1194 (size=136454) 2024-12-16T19:45:38,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742018_1194 (size=136454) 2024-12-16T19:45:39,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742019_1195 (size=907467) 2024-12-16T19:45:39,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742019_1195 (size=907467) 2024-12-16T19:45:39,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742019_1195 (size=907467) 2024-12-16T19:45:39,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742020_1196 (size=3317408) 2024-12-16T19:45:39,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742020_1196 (size=3317408) 2024-12-16T19:45:39,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742020_1196 (size=3317408) 2024-12-16T19:45:39,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742021_1197 (size=503880) 2024-12-16T19:45:39,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742021_1197 (size=503880) 2024-12-16T19:45:39,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742021_1197 (size=503880) 2024-12-16T19:45:39,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742022_1198 (size=4695811) 2024-12-16T19:45:39,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742022_1198 (size=4695811) 2024-12-16T19:45:39,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742022_1198 (size=4695811) 2024-12-16T19:45:39,318 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:45:39,330 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-16T19:45:39,334 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.4 K 2024-12-16T19:45:39,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742023_1199 (size=726) 2024-12-16T19:45:39,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742023_1199 (size=726) 2024-12-16T19:45:39,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742023_1199 (size=726) 2024-12-16T19:45:39,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742024_1200 (size=15) 2024-12-16T19:45:39,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742024_1200 (size=15) 2024-12-16T19:45:39,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742024_1200 (size=15) 2024-12-16T19:45:39,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742025_1201 (size=304887) 2024-12-16T19:45:39,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742025_1201 (size=304887) 2024-12-16T19:45:39,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742025_1201 (size=304887) 2024-12-16T19:45:39,657 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:45:39,657 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:45:39,747 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0003_000001 (auth:SIMPLE) from 127.0.0.1:36112 2024-12-16T19:45:41,545 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:45:47,414 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0003_000001 (auth:SIMPLE) from 127.0.0.1:38178 2024-12-16T19:45:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742026_1202 (size=350561) 2024-12-16T19:45:47,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742026_1202 (size=350561) 2024-12-16T19:45:47,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742026_1202 (size=350561) 2024-12-16T19:45:49,684 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0003_000001 (auth:SIMPLE) from 127.0.0.1:50860 2024-12-16T19:45:54,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742027_1203 (size=14999) 2024-12-16T19:45:54,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742027_1203 (size=14999) 2024-12-16T19:45:54,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742027_1203 (size=14999) 2024-12-16T19:45:54,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742028_1204 (size=8172) 2024-12-16T19:45:54,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742028_1204 (size=8172) 2024-12-16T19:45:54,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742028_1204 (size=8172) 2024-12-16T19:45:54,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742029_1205 (size=5918) 2024-12-16T19:45:54,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742029_1205 (size=5918) 2024-12-16T19:45:54,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742029_1205 (size=5918) 2024-12-16T19:45:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742030_1206 (size=5102) 2024-12-16T19:45:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742030_1206 (size=5102) 2024-12-16T19:45:54,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742030_1206 (size=5102) 2024-12-16T19:45:54,255 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:45:54,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742031_1207 (size=17462) 2024-12-16T19:45:54,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742031_1207 (size=17462) 2024-12-16T19:45:54,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742031_1207 (size=17462) 2024-12-16T19:45:54,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742032_1208 (size=465) 2024-12-16T19:45:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742032_1208 (size=465) 2024-12-16T19:45:54,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742032_1208 (size=465) 2024-12-16T19:45:54,314 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0003/container_1734378249643_0003_01_000002/launch_container.sh] 2024-12-16T19:45:54,314 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0003/container_1734378249643_0003_01_000002/container_tokens] 2024-12-16T19:45:54,314 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0003/container_1734378249643_0003_01_000002/sysfs] 2024-12-16T19:45:54,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742033_1209 (size=17462) 2024-12-16T19:45:54,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742033_1209 (size=17462) 2024-12-16T19:45:54,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742033_1209 (size=17462) 2024-12-16T19:45:54,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742034_1210 (size=350561) 2024-12-16T19:45:54,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742034_1210 (size=350561) 2024-12-16T19:45:54,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742034_1210 (size=350561) 2024-12-16T19:45:54,358 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0003_000001 (auth:SIMPLE) from 127.0.0.1:50866 2024-12-16T19:45:55,920 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:45:55,921 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:45:55,973 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-16T19:45:55,974 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:45:55,976 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:45:55,976 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-16T19:45:55,977 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-16T19:45:55,977 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-16T19:45:55,977 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-16T19:45:55,978 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-16T19:45:55,978 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378334822/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-16T19:45:56,017 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-16T19:45:56,017 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-16T19:45:56,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,022 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378356022"}]},"ts":"1734378356022"} 2024-12-16T19:45:56,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-16T19:45:56,025 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-16T19:45:56,059 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-16T19:45:56,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-16T19:45:56,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, UNASSIGN}] 2024-12-16T19:45:56,074 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, UNASSIGN 2024-12-16T19:45:56,075 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, UNASSIGN 2024-12-16T19:45:56,076 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=6e64496113249683a5df78b85fbdb410, regionState=CLOSING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:56,076 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=6f7c9d58bb51971a888d867212624f60, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:56,078 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:45:56,078 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure 6f7c9d58bb51971a888d867212624f60, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:45:56,080 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:45:56,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure 6e64496113249683a5df78b85fbdb410, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:45:56,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-16T19:45:56,231 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:45:56,232 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:56,232 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:45:56,232 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing 6f7c9d58bb51971a888d867212624f60, disabling compactions & flushes 2024-12-16T19:45:56,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:56,232 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:56,232 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:56,232 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. after waiting 0 ms 2024-12-16T19:45:56,232 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:56,233 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:56,233 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:45:56,233 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing 6e64496113249683a5df78b85fbdb410, disabling compactions & flushes 2024-12-16T19:45:56,233 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:56,233 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:56,233 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. after waiting 0 ms 2024-12-16T19:45:56,233 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:56,237 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:45:56,237 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:45:56,238 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:45:56,238 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:45:56,238 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410. 2024-12-16T19:45:56,238 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60. 2024-12-16T19:45:56,238 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for 6f7c9d58bb51971a888d867212624f60: 2024-12-16T19:45:56,238 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for 6e64496113249683a5df78b85fbdb410: 2024-12-16T19:45:56,239 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed 6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:56,240 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=6f7c9d58bb51971a888d867212624f60, regionState=CLOSED 2024-12-16T19:45:56,240 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed 6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:56,240 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=6e64496113249683a5df78b85fbdb410, regionState=CLOSED 2024-12-16T19:45:56,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-16T19:45:56,243 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure 6f7c9d58bb51971a888d867212624f60, server=7a6e4bf70377,42861,1734378237024 in 164 msec 2024-12-16T19:45:56,243 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-16T19:45:56,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure 6e64496113249683a5df78b85fbdb410, server=7a6e4bf70377,45217,1734378236605 in 162 msec 2024-12-16T19:45:56,245 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6f7c9d58bb51971a888d867212624f60, UNASSIGN in 179 msec 2024-12-16T19:45:56,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-16T19:45:56,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=6e64496113249683a5df78b85fbdb410, UNASSIGN in 181 msec 2024-12-16T19:45:56,248 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-16T19:45:56,248 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 187 msec 2024-12-16T19:45:56,249 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378356248"}]},"ts":"1734378356248"} 2024-12-16T19:45:56,250 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-16T19:45:56,259 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-16T19:45:56,261 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 242 msec 2024-12-16T19:45:56,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-16T19:45:56,328 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-16T19:45:56,329 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-16T19:45:56,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,330 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-16T19:45:56,331 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,332 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-16T19:45:56,334 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:56,334 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:56,336 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/recovered.edits] 2024-12-16T19:45:56,336 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/recovered.edits] 2024-12-16T19:45:56,339 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf/266ae43da1f046489b93b3281771fb10 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/cf/266ae43da1f046489b93b3281771fb10 2024-12-16T19:45:56,339 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf/70c74b8402e44d439beee53f5a46f441 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/cf/70c74b8402e44d439beee53f5a46f441 2024-12-16T19:45:56,341 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410/recovered.edits/9.seqid 2024-12-16T19:45:56,342 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60/recovered.edits/9.seqid 2024-12-16T19:45:56,342 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:56,342 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemState/6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:56,342 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-16T19:45:56,343 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-16T19:45:56,343 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-12-16T19:45:56,347 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202412168e89f2bcb57648af90597600c658051d_6e64496113249683a5df78b85fbdb410 2024-12-16T19:45:56,347 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b20241216739c41f607c042a8b88a5048ca1a0b06_6f7c9d58bb51971a888d867212624f60 2024-12-16T19:45:56,347 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-12-16T19:45:56,350 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T19:45:56,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T19:45:56,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T19:45:56,351 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-16T19:45:56,352 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-16T19:45:56,354 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-16T19:45:56,356 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,356 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-16T19:45:56,356 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378356356"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:56,356 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378356356"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:56,359 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:45:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,359 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,359 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6e64496113249683a5df78b85fbdb410, NAME => 'testtb-testExportFileSystemState,,1734378330052.6e64496113249683a5df78b85fbdb410.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6f7c9d58bb51971a888d867212624f60, NAME => 'testtb-testExportFileSystemState,1,1734378330052.6f7c9d58bb51971a888d867212624f60.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:45:56,360 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-16T19:45:56,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-16T19:45:56,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-16T19:45:56,360 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378356360"}]},"ts":"9223372036854775807"} 2024-12-16T19:45:56,365 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-16T19:45:56,374 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-16T19:45:56,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 45 msec 2024-12-16T19:45:56,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-16T19:45:56,462 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-16T19:45:56,471 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-16T19:45:56,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-16T19:45:56,476 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-16T19:45:56,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-16T19:45:56,505 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=781 (was 778) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:52364 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 62148) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:38998 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-875156200_1 at /127.0.0.1:33780 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:52208 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:33833 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38137 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:38137 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x736823cc-shared-pool-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3064 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x736823cc-shared-pool-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=804 (was 803) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1093 (was 1110), ProcessCount=17 (was 17), AvailableMemoryMB=2059 (was 3033) 2024-12-16T19:45:56,506 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-16T19:45:56,525 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=781, OpenFileDescriptor=804, MaxFileDescriptor=1048576, SystemLoadAverage=1093, ProcessCount=17, AvailableMemoryMB=2058 2024-12-16T19:45:56,525 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-16T19:45:56,527 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:45:56,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:45:56,529 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:45:56,530 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-16T19:45:56,530 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:45:56,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T19:45:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742035_1211 (size=440) 2024-12-16T19:45:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742035_1211 (size=440) 2024-12-16T19:45:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742035_1211 (size=440) 2024-12-16T19:45:56,542 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 66fba5611c85f6777b8785e0032e7521, NAME => 'testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:56,542 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => a80c77746132b3c3234c95a6bb030b86, NAME => 'testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:56,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742036_1212 (size=65) 2024-12-16T19:45:56,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742036_1212 (size=65) 2024-12-16T19:45:56,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742036_1212 (size=65) 2024-12-16T19:45:56,571 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:56,572 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing a80c77746132b3c3234c95a6bb030b86, disabling compactions & flushes 2024-12-16T19:45:56,572 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,572 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,572 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. after waiting 0 ms 2024-12-16T19:45:56,572 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,572 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,572 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for a80c77746132b3c3234c95a6bb030b86: 2024-12-16T19:45:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742037_1213 (size=65) 2024-12-16T19:45:56,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742037_1213 (size=65) 2024-12-16T19:45:56,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742037_1213 (size=65) 2024-12-16T19:45:56,591 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:56,591 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 66fba5611c85f6777b8785e0032e7521, disabling compactions & flushes 2024-12-16T19:45:56,591 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,591 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,591 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. after waiting 0 ms 2024-12-16T19:45:56,591 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,591 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,591 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 66fba5611c85f6777b8785e0032e7521: 2024-12-16T19:45:56,606 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:45:56,606 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734378356606"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378356606"}]},"ts":"1734378356606"} 2024-12-16T19:45:56,606 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734378356606"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378356606"}]},"ts":"1734378356606"} 2024-12-16T19:45:56,613 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:45:56,615 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:45:56,615 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378356615"}]},"ts":"1734378356615"} 2024-12-16T19:45:56,617 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-16T19:45:56,638 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:45:56,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T19:45:56,642 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:45:56,642 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:45:56,643 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:45:56,643 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:45:56,643 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:45:56,643 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:45:56,643 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:45:56,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, ASSIGN}] 2024-12-16T19:45:56,646 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, ASSIGN 2024-12-16T19:45:56,646 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, ASSIGN 2024-12-16T19:45:56,649 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:45:56,652 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:45:56,800 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:45:56,801 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=66fba5611c85f6777b8785e0032e7521, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:56,801 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=a80c77746132b3c3234c95a6bb030b86, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:56,802 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; OpenRegionProcedure a80c77746132b3c3234c95a6bb030b86, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:45:56,803 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=82, state=RUNNABLE; OpenRegionProcedure 66fba5611c85f6777b8785e0032e7521, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:45:56,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T19:45:56,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:56,954 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:56,957 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,957 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,957 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 66fba5611c85f6777b8785e0032e7521, NAME => 'testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:45:56,957 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => a80c77746132b3c3234c95a6bb030b86, NAME => 'testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:45:56,957 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. service=AccessControlService 2024-12-16T19:45:56,957 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. service=AccessControlService 2024-12-16T19:45:56,957 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:45:56,957 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,958 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,959 INFO [StoreOpener-66fba5611c85f6777b8785e0032e7521-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,959 INFO [StoreOpener-a80c77746132b3c3234c95a6bb030b86-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,961 INFO [StoreOpener-66fba5611c85f6777b8785e0032e7521-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 66fba5611c85f6777b8785e0032e7521 columnFamilyName cf 2024-12-16T19:45:56,961 INFO [StoreOpener-a80c77746132b3c3234c95a6bb030b86-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a80c77746132b3c3234c95a6bb030b86 columnFamilyName cf 2024-12-16T19:45:56,962 DEBUG [StoreOpener-a80c77746132b3c3234c95a6bb030b86-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:56,962 DEBUG [StoreOpener-66fba5611c85f6777b8785e0032e7521-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:56,962 INFO [StoreOpener-a80c77746132b3c3234c95a6bb030b86-1 {}] regionserver.HStore(327): Store=a80c77746132b3c3234c95a6bb030b86/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:45:56,962 INFO [StoreOpener-66fba5611c85f6777b8785e0032e7521-1 {}] regionserver.HStore(327): Store=66fba5611c85f6777b8785e0032e7521/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:45:56,963 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,964 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,965 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,965 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:56,966 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,967 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:56,967 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:45:56,968 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened a80c77746132b3c3234c95a6bb030b86; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74186600, jitterRate=0.10546648502349854}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:45:56,968 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for a80c77746132b3c3234c95a6bb030b86: 2024-12-16T19:45:56,969 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:45:56,969 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86., pid=84, masterSystemTime=1734378356954 2024-12-16T19:45:56,969 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 66fba5611c85f6777b8785e0032e7521; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64505046, jitterRate=-0.03879991173744202}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:45:56,970 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 66fba5611c85f6777b8785e0032e7521: 2024-12-16T19:45:56,970 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521., pid=85, masterSystemTime=1734378356954 2024-12-16T19:45:56,970 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,970 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:56,971 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=a80c77746132b3c3234c95a6bb030b86, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:56,971 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,971 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:56,972 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=66fba5611c85f6777b8785e0032e7521, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:56,973 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-16T19:45:56,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=82 2024-12-16T19:45:56,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; OpenRegionProcedure a80c77746132b3c3234c95a6bb030b86, server=7a6e4bf70377,42741,1734378236851 in 170 msec 2024-12-16T19:45:56,974 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=82, state=SUCCESS; OpenRegionProcedure 66fba5611c85f6777b8785e0032e7521, server=7a6e4bf70377,45217,1734378236605 in 170 msec 2024-12-16T19:45:56,974 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, ASSIGN in 330 msec 2024-12-16T19:45:56,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-16T19:45:56,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, ASSIGN in 331 msec 2024-12-16T19:45:56,975 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:45:56,976 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378356975"}]},"ts":"1734378356975"} 2024-12-16T19:45:56,977 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-16T19:45:56,985 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:45:56,985 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-16T19:45:56,987 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-16T19:45:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:56,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:45:57,057 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:57,057 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:57,057 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:57,057 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-16T19:45:57,058 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 530 msec 2024-12-16T19:45:57,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-16T19:45:57,142 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-16T19:45:57,144 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-16T19:45:57,144 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:57,144 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:45:57,161 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T19:45:57,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378357161 (current time:1734378357161). 2024-12-16T19:45:57,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:45:57,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-16T19:45:57,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:45:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x146e33eb to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1086f343 2024-12-16T19:45:57,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41dcb03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:57,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:57,179 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54018, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x146e33eb to 127.0.0.1:62515 2024-12-16T19:45:57,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:57,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x29fce4e4 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2335834 2024-12-16T19:45:57,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@681cdea2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:57,195 DEBUG [hconnection-0x7ac968e5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:57,196 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:57,199 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:57,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x29fce4e4 to 127.0.0.1:62515 2024-12-16T19:45:57,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:57,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-16T19:45:57,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:45:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T19:45:57,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-16T19:45:57,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T19:45:57,206 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:45:57,207 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:45:57,210 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:45:57,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742038_1214 (size=161) 2024-12-16T19:45:57,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742038_1214 (size=161) 2024-12-16T19:45:57,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742038_1214 (size=161) 2024-12-16T19:45:57,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T19:45:57,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T19:45:57,632 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:45:57,632 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86}] 2024-12-16T19:45:57,633 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:57,633 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:57,784 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:57,784 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:57,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-16T19:45:57,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for a80c77746132b3c3234c95a6bb030b86: 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 66fba5611c85f6777b8785e0032e7521: 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. for emptySnaptb0-testConsecutiveExports completed. 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. for emptySnaptb0-testConsecutiveExports completed. 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:57,785 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:45:57,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742040_1216 (size=68) 2024-12-16T19:45:57,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742039_1215 (size=68) 2024-12-16T19:45:57,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742040_1216 (size=68) 2024-12-16T19:45:57,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742040_1216 (size=68) 2024-12-16T19:45:57,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742039_1215 (size=68) 2024-12-16T19:45:57,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:57,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-16T19:45:57,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742039_1215 (size=68) 2024-12-16T19:45:57,796 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:57,797 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-16T19:45:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-16T19:45:57,797 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-16T19:45:57,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:57,797 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:57,797 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:57,798 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521 in 166 msec 2024-12-16T19:45:57,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=86 2024-12-16T19:45:57,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86 in 166 msec 2024-12-16T19:45:57,799 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:45:57,802 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:45:57,804 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:45:57,804 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:45:57,804 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:57,804 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:45:57,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T19:45:57,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742041_1217 (size=60) 2024-12-16T19:45:57,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742041_1217 (size=60) 2024-12-16T19:45:57,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742041_1217 (size=60) 2024-12-16T19:45:58,213 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:45:58,213 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-16T19:45:58,214 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-16T19:45:58,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742042_1218 (size=641) 2024-12-16T19:45:58,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742042_1218 (size=641) 2024-12-16T19:45:58,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742042_1218 (size=641) 2024-12-16T19:45:58,236 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:45:58,242 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:45:58,243 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-16T19:45:58,245 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:45:58,245 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-16T19:45:58,247 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 1.0430 sec 2024-12-16T19:45:58,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-16T19:45:58,310 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-16T19:45:58,314 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:45:58,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:45:58,321 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-16T19:45:58,322 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:58,322 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:45:58,337 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T19:45:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378358337 (current time:1734378358337). 2024-12-16T19:45:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:45:58,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-16T19:45:58,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:45:58,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a3f9db4 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@682c15a0 2024-12-16T19:45:58,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dfb0dd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:58,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:58,371 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a3f9db4 to 127.0.0.1:62515 2024-12-16T19:45:58,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:58,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65dbc960 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d52416e 2024-12-16T19:45:58,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18324499, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:45:58,395 DEBUG [hconnection-0x7283981d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:58,396 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:58,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:45:58,399 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34004, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:45:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65dbc960 to 127.0.0.1:62515 2024-12-16T19:45:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:45:58,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-16T19:45:58,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:45:58,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-16T19:45:58,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-16T19:45:58,408 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:45:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T19:45:58,410 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:45:58,416 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:45:58,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742043_1219 (size=156) 2024-12-16T19:45:58,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742043_1219 (size=156) 2024-12-16T19:45:58,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742043_1219 (size=156) 2024-12-16T19:45:58,434 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:45:58,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86}] 2024-12-16T19:45:58,436 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:58,436 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T19:45:58,588 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:45:58,588 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:45:58,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-16T19:45:58,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-16T19:45:58,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:58,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:58,589 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing a80c77746132b3c3234c95a6bb030b86 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-16T19:45:58,589 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 66fba5611c85f6777b8785e0032e7521 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-16T19:45:58,613 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86 is 71, key is 104a35635cf39a315f164c311e0dc922/cf:q/1734378358317/Put/seqid=0 2024-12-16T19:45:58,619 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521 is 71, key is 02bdd502b59da1f48e8e2efec8f37c19/cf:q/1734378358313/Put/seqid=0 2024-12-16T19:45:58,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742044_1220 (size=7961) 2024-12-16T19:45:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742044_1220 (size=7961) 2024-12-16T19:45:58,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742044_1220 (size=7961) 2024-12-16T19:45:58,631 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:58,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742045_1221 (size=5312) 2024-12-16T19:45:58,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742045_1221 (size=5312) 2024-12-16T19:45:58,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742045_1221 (size=5312) 2024-12-16T19:45:58,638 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:58,638 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:58,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/.tmp/cf/ec3c027bea8c4f12a03c4264584a9c1f, store: [table=testtb-testConsecutiveExports family=cf region=a80c77746132b3c3234c95a6bb030b86] 2024-12-16T19:45:58,640 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/.tmp/cf/ec3c027bea8c4f12a03c4264584a9c1f is 206, key is 190197dacb0e441fb1486c8e8446cf1c9/cf:q/1734378358317/Put/seqid=0 2024-12-16T19:45:58,644 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:58,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/.tmp/cf/928454092f0945a7ba5b23d09071c0b4, store: [table=testtb-testConsecutiveExports family=cf region=66fba5611c85f6777b8785e0032e7521] 2024-12-16T19:45:58,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/.tmp/cf/928454092f0945a7ba5b23d09071c0b4 is 206, key is 0095f3acacdb41b560914b29cbb1c5b42/cf:q/1734378358313/Put/seqid=0 2024-12-16T19:45:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742046_1222 (size=14247) 2024-12-16T19:45:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742046_1222 (size=14247) 2024-12-16T19:45:58,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742046_1222 (size=14247) 2024-12-16T19:45:58,653 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/.tmp/cf/ec3c027bea8c4f12a03c4264584a9c1f 2024-12-16T19:45:58,662 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/.tmp/cf/ec3c027bea8c4f12a03c4264584a9c1f as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf/ec3c027bea8c4f12a03c4264584a9c1f 2024-12-16T19:45:58,669 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf/ec3c027bea8c4f12a03c4264584a9c1f, entries=44, sequenceid=6, filesize=13.9 K 2024-12-16T19:45:58,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for a80c77746132b3c3234c95a6bb030b86 in 83ms, sequenceid=6, compaction requested=false 2024-12-16T19:45:58,672 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-16T19:45:58,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for a80c77746132b3c3234c95a6bb030b86: 2024-12-16T19:45:58,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. for snaptb0-testConsecutiveExports completed. 2024-12-16T19:45:58,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-16T19:45:58,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:58,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf/ec3c027bea8c4f12a03c4264584a9c1f] hfiles 2024-12-16T19:45:58,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf/ec3c027bea8c4f12a03c4264584a9c1f for snapshot=snaptb0-testConsecutiveExports 2024-12-16T19:45:58,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742047_1223 (size=6512) 2024-12-16T19:45:58,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742047_1223 (size=6512) 2024-12-16T19:45:58,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742047_1223 (size=6512) 2024-12-16T19:45:58,687 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/.tmp/cf/928454092f0945a7ba5b23d09071c0b4 2024-12-16T19:45:58,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/.tmp/cf/928454092f0945a7ba5b23d09071c0b4 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf/928454092f0945a7ba5b23d09071c0b4 2024-12-16T19:45:58,703 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf/928454092f0945a7ba5b23d09071c0b4, entries=6, sequenceid=6, filesize=6.4 K 2024-12-16T19:45:58,704 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for 66fba5611c85f6777b8785e0032e7521 in 114ms, sequenceid=6, compaction requested=false 2024-12-16T19:45:58,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 66fba5611c85f6777b8785e0032e7521: 2024-12-16T19:45:58,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. for snaptb0-testConsecutiveExports completed. 2024-12-16T19:45:58,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-16T19:45:58,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:45:58,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf/928454092f0945a7ba5b23d09071c0b4] hfiles 2024-12-16T19:45:58,705 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf/928454092f0945a7ba5b23d09071c0b4 for snapshot=snaptb0-testConsecutiveExports 2024-12-16T19:45:58,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T19:45:58,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742048_1224 (size=107) 2024-12-16T19:45:58,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742048_1224 (size=107) 2024-12-16T19:45:58,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742048_1224 (size=107) 2024-12-16T19:45:58,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:45:58,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-16T19:45:58,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-16T19:45:58,736 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:58,737 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:58,739 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure a80c77746132b3c3234c95a6bb030b86 in 304 msec 2024-12-16T19:45:58,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742049_1225 (size=107) 2024-12-16T19:45:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742049_1225 (size=107) 2024-12-16T19:45:58,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742049_1225 (size=107) 2024-12-16T19:45:58,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:45:58,760 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-16T19:45:58,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-16T19:45:58,761 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:58,761 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:58,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-16T19:45:58,763 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:45:58,763 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 66fba5611c85f6777b8785e0032e7521 in 327 msec 2024-12-16T19:45:58,763 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:45:58,765 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:45:58,765 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:45:58,765 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:45:58,766 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521] hfiles 2024-12-16T19:45:58,766 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:45:58,766 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521 2024-12-16T19:45:58,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742050_1226 (size=291) 2024-12-16T19:45:58,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742050_1226 (size=291) 2024-12-16T19:45:58,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742050_1226 (size=291) 2024-12-16T19:45:59,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T19:45:59,185 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:45:59,185 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-16T19:45:59,186 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T19:45:59,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742051_1227 (size=951) 2024-12-16T19:45:59,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742051_1227 (size=951) 2024-12-16T19:45:59,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742051_1227 (size=951) 2024-12-16T19:45:59,223 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:45:59,246 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:45:59,247 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T19:45:59,249 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:45:59,250 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-16T19:45:59,252 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 847 msec 2024-12-16T19:45:59,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-16T19:45:59,514 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-16T19:45:59,514 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514 2024-12-16T19:45:59,514 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:59,558 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:45:59,558 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@11dd3e06, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T19:45:59,560 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:45:59,591 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T19:45:59,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:59,621 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:59,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:45:59,622 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,524 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0003_000001 (auth:SIMPLE) from 127.0.0.1:51732 2024-12-16T19:46:00,553 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0003/container_1734378249643_0003_01_000001/launch_container.sh] 2024-12-16T19:46:00,553 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0003/container_1734378249643_0003_01_000001/container_tokens] 2024-12-16T19:46:00,553 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0003/container_1734378249643_0003_01_000001/sysfs] 2024-12-16T19:46:00,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-10564565383794550156.jar 2024-12-16T19:46:00,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-14896635403397094442.jar 2024-12-16T19:46:00,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:00,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:46:00,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:46:00,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:46:00,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:46:00,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:46:00,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:46:00,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:46:00,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:46:00,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:46:00,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:46:00,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:46:00,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:46:00,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:00,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:00,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:00,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:00,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:00,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:00,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:01,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742052_1228 (size=127628) 2024-12-16T19:46:01,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742052_1228 (size=127628) 2024-12-16T19:46:01,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742052_1228 (size=127628) 2024-12-16T19:46:01,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742053_1229 (size=2172137) 2024-12-16T19:46:01,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742053_1229 (size=2172137) 2024-12-16T19:46:01,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742053_1229 (size=2172137) 2024-12-16T19:46:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742054_1230 (size=213228) 2024-12-16T19:46:01,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742054_1230 (size=213228) 2024-12-16T19:46:01,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742054_1230 (size=213228) 2024-12-16T19:46:01,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742055_1231 (size=1877034) 2024-12-16T19:46:01,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742055_1231 (size=1877034) 2024-12-16T19:46:01,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742055_1231 (size=1877034) 2024-12-16T19:46:01,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:46:01,857 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region a80c77746132b3c3234c95a6bb030b86 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:46:01,858 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 66fba5611c85f6777b8785e0032e7521 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:46:01,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742056_1232 (size=533455) 2024-12-16T19:46:01,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742056_1232 (size=533455) 2024-12-16T19:46:01,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742056_1232 (size=533455) 2024-12-16T19:46:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742057_1233 (size=7280644) 2024-12-16T19:46:01,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742057_1233 (size=7280644) 2024-12-16T19:46:01,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742057_1233 (size=7280644) 2024-12-16T19:46:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742058_1234 (size=4188619) 2024-12-16T19:46:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742058_1234 (size=4188619) 2024-12-16T19:46:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742058_1234 (size=4188619) 2024-12-16T19:46:02,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742059_1235 (size=20406) 2024-12-16T19:46:02,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742059_1235 (size=20406) 2024-12-16T19:46:02,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742059_1235 (size=20406) 2024-12-16T19:46:02,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742060_1236 (size=75495) 2024-12-16T19:46:02,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742060_1236 (size=75495) 2024-12-16T19:46:02,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742060_1236 (size=75495) 2024-12-16T19:46:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742061_1237 (size=45609) 2024-12-16T19:46:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742061_1237 (size=45609) 2024-12-16T19:46:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742061_1237 (size=45609) 2024-12-16T19:46:02,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742062_1238 (size=110084) 2024-12-16T19:46:02,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742062_1238 (size=110084) 2024-12-16T19:46:02,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742062_1238 (size=110084) 2024-12-16T19:46:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742063_1239 (size=1323991) 2024-12-16T19:46:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742063_1239 (size=1323991) 2024-12-16T19:46:02,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742063_1239 (size=1323991) 2024-12-16T19:46:02,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742064_1240 (size=23076) 2024-12-16T19:46:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742064_1240 (size=23076) 2024-12-16T19:46:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742064_1240 (size=23076) 2024-12-16T19:46:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742065_1241 (size=126803) 2024-12-16T19:46:02,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742065_1241 (size=126803) 2024-12-16T19:46:02,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742065_1241 (size=126803) 2024-12-16T19:46:02,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742066_1242 (size=322274) 2024-12-16T19:46:02,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742066_1242 (size=322274) 2024-12-16T19:46:02,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742066_1242 (size=322274) 2024-12-16T19:46:02,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742067_1243 (size=1832290) 2024-12-16T19:46:02,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742067_1243 (size=1832290) 2024-12-16T19:46:02,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742067_1243 (size=1832290) 2024-12-16T19:46:02,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742068_1244 (size=6350922) 2024-12-16T19:46:02,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742068_1244 (size=6350922) 2024-12-16T19:46:02,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742068_1244 (size=6350922) 2024-12-16T19:46:02,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742069_1245 (size=451756) 2024-12-16T19:46:02,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742069_1245 (size=451756) 2024-12-16T19:46:02,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742069_1245 (size=451756) 2024-12-16T19:46:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742070_1246 (size=30081) 2024-12-16T19:46:02,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742070_1246 (size=30081) 2024-12-16T19:46:02,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742070_1246 (size=30081) 2024-12-16T19:46:02,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742071_1247 (size=53616) 2024-12-16T19:46:02,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742071_1247 (size=53616) 2024-12-16T19:46:02,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742071_1247 (size=53616) 2024-12-16T19:46:02,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742072_1248 (size=29229) 2024-12-16T19:46:02,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742072_1248 (size=29229) 2024-12-16T19:46:02,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742072_1248 (size=29229) 2024-12-16T19:46:02,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742073_1249 (size=169089) 2024-12-16T19:46:02,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742073_1249 (size=169089) 2024-12-16T19:46:02,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742073_1249 (size=169089) 2024-12-16T19:46:02,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742074_1250 (size=5175431) 2024-12-16T19:46:02,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742074_1250 (size=5175431) 2024-12-16T19:46:02,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742074_1250 (size=5175431) 2024-12-16T19:46:02,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742075_1251 (size=136454) 2024-12-16T19:46:02,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742075_1251 (size=136454) 2024-12-16T19:46:02,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742075_1251 (size=136454) 2024-12-16T19:46:02,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742076_1252 (size=907467) 2024-12-16T19:46:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742076_1252 (size=907467) 2024-12-16T19:46:02,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742076_1252 (size=907467) 2024-12-16T19:46:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742077_1253 (size=3317408) 2024-12-16T19:46:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742077_1253 (size=3317408) 2024-12-16T19:46:02,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742077_1253 (size=3317408) 2024-12-16T19:46:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742078_1254 (size=503880) 2024-12-16T19:46:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742078_1254 (size=503880) 2024-12-16T19:46:03,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742078_1254 (size=503880) 2024-12-16T19:46:03,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742079_1255 (size=4695811) 2024-12-16T19:46:03,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742079_1255 (size=4695811) 2024-12-16T19:46:03,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742079_1255 (size=4695811) 2024-12-16T19:46:03,557 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:46:03,559 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-16T19:46:03,562 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.2 K 2024-12-16T19:46:03,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742080_1256 (size=714) 2024-12-16T19:46:03,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742080_1256 (size=714) 2024-12-16T19:46:03,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742080_1256 (size=714) 2024-12-16T19:46:03,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742081_1257 (size=15) 2024-12-16T19:46:03,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742081_1257 (size=15) 2024-12-16T19:46:03,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742081_1257 (size=15) 2024-12-16T19:46:03,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742082_1258 (size=304930) 2024-12-16T19:46:03,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742082_1258 (size=304930) 2024-12-16T19:46:03,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742082_1258 (size=304930) 2024-12-16T19:46:03,622 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:46:03,622 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:46:03,909 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0004_000001 (auth:SIMPLE) from 127.0.0.1:39322 2024-12-16T19:46:06,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-16T19:46:06,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-16T19:46:06,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-16T19:46:10,869 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0004_000001 (auth:SIMPLE) from 127.0.0.1:36430 2024-12-16T19:46:11,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742083_1259 (size=350604) 2024-12-16T19:46:11,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742083_1259 (size=350604) 2024-12-16T19:46:11,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742083_1259 (size=350604) 2024-12-16T19:46:11,543 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:46:13,375 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0004_000001 (auth:SIMPLE) from 127.0.0.1:45478 2024-12-16T19:46:19,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742084_1260 (size=17451) 2024-12-16T19:46:19,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742084_1260 (size=17451) 2024-12-16T19:46:19,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742084_1260 (size=17451) 2024-12-16T19:46:19,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742085_1261 (size=462) 2024-12-16T19:46:19,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742085_1261 (size=462) 2024-12-16T19:46:19,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742085_1261 (size=462) 2024-12-16T19:46:19,394 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0004/container_1734378249643_0004_01_000002/launch_container.sh] 2024-12-16T19:46:19,394 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0004/container_1734378249643_0004_01_000002/container_tokens] 2024-12-16T19:46:19,394 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_3/usercache/jenkins/appcache/application_1734378249643_0004/container_1734378249643_0004_01_000002/sysfs] 2024-12-16T19:46:19,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742086_1262 (size=17451) 2024-12-16T19:46:19,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742086_1262 (size=17451) 2024-12-16T19:46:19,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742086_1262 (size=17451) 2024-12-16T19:46:19,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742087_1263 (size=350604) 2024-12-16T19:46:19,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742087_1263 (size=350604) 2024-12-16T19:46:19,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742087_1263 (size=350604) 2024-12-16T19:46:19,453 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0004_000001 (auth:SIMPLE) from 127.0.0.1:43670 2024-12-16T19:46:20,850 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:46:20,851 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:46:20,871 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-16T19:46:20,871 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:46:20,871 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:46:20,872 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T19:46:20,873 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T19:46:20,873 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T19:46:20,873 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@11dd3e06 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T19:46:20,873 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T19:46:20,873 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T19:46:20,875 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:20,919 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:20,919 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@11dd3e06, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T19:46:20,922 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:46:20,935 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-16T19:46:20,960 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:20,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:20,961 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:20,962 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,125 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-10258070815362045981.jar 2024-12-16T19:46:22,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,126 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-8071073013442605604.jar 2024-12-16T19:46:22,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,192 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,194 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:22,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:46:22,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:46:22,195 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:46:22,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:46:22,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:46:22,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:46:22,196 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:46:22,197 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:46:22,197 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:46:22,197 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:46:22,198 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:46:22,198 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:46:22,198 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:22,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:22,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:22,199 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:22,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:22,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:22,200 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:22,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742088_1264 (size=127628) 2024-12-16T19:46:22,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742088_1264 (size=127628) 2024-12-16T19:46:22,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742088_1264 (size=127628) 2024-12-16T19:46:22,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742089_1265 (size=2172137) 2024-12-16T19:46:22,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742089_1265 (size=2172137) 2024-12-16T19:46:22,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742089_1265 (size=2172137) 2024-12-16T19:46:22,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742090_1266 (size=213228) 2024-12-16T19:46:22,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742090_1266 (size=213228) 2024-12-16T19:46:22,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742090_1266 (size=213228) 2024-12-16T19:46:22,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742091_1267 (size=1877034) 2024-12-16T19:46:22,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742091_1267 (size=1877034) 2024-12-16T19:46:22,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742091_1267 (size=1877034) 2024-12-16T19:46:22,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742092_1268 (size=533455) 2024-12-16T19:46:22,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742092_1268 (size=533455) 2024-12-16T19:46:22,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742092_1268 (size=533455) 2024-12-16T19:46:22,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742093_1269 (size=7280644) 2024-12-16T19:46:22,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742093_1269 (size=7280644) 2024-12-16T19:46:22,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742093_1269 (size=7280644) 2024-12-16T19:46:22,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742094_1270 (size=451756) 2024-12-16T19:46:22,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742094_1270 (size=451756) 2024-12-16T19:46:22,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742094_1270 (size=451756) 2024-12-16T19:46:22,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742095_1271 (size=4188619) 2024-12-16T19:46:22,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742095_1271 (size=4188619) 2024-12-16T19:46:22,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742095_1271 (size=4188619) 2024-12-16T19:46:22,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742096_1272 (size=20406) 2024-12-16T19:46:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742096_1272 (size=20406) 2024-12-16T19:46:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742096_1272 (size=20406) 2024-12-16T19:46:23,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742097_1273 (size=75495) 2024-12-16T19:46:23,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742097_1273 (size=75495) 2024-12-16T19:46:23,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742097_1273 (size=75495) 2024-12-16T19:46:23,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742098_1274 (size=45609) 2024-12-16T19:46:23,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742098_1274 (size=45609) 2024-12-16T19:46:23,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742098_1274 (size=45609) 2024-12-16T19:46:23,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742099_1275 (size=6350922) 2024-12-16T19:46:23,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742099_1275 (size=6350922) 2024-12-16T19:46:23,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742099_1275 (size=6350922) 2024-12-16T19:46:23,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742100_1276 (size=110084) 2024-12-16T19:46:23,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742100_1276 (size=110084) 2024-12-16T19:46:23,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742100_1276 (size=110084) 2024-12-16T19:46:23,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742101_1277 (size=1323991) 2024-12-16T19:46:23,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742101_1277 (size=1323991) 2024-12-16T19:46:23,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742101_1277 (size=1323991) 2024-12-16T19:46:23,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742102_1278 (size=23076) 2024-12-16T19:46:23,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742102_1278 (size=23076) 2024-12-16T19:46:23,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742102_1278 (size=23076) 2024-12-16T19:46:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742103_1279 (size=126803) 2024-12-16T19:46:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742103_1279 (size=126803) 2024-12-16T19:46:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742103_1279 (size=126803) 2024-12-16T19:46:23,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742104_1280 (size=322274) 2024-12-16T19:46:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742104_1280 (size=322274) 2024-12-16T19:46:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742104_1280 (size=322274) 2024-12-16T19:46:23,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742105_1281 (size=1832290) 2024-12-16T19:46:23,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742105_1281 (size=1832290) 2024-12-16T19:46:23,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742105_1281 (size=1832290) 2024-12-16T19:46:23,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742106_1282 (size=30081) 2024-12-16T19:46:23,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742106_1282 (size=30081) 2024-12-16T19:46:23,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742106_1282 (size=30081) 2024-12-16T19:46:23,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742107_1283 (size=53616) 2024-12-16T19:46:23,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742107_1283 (size=53616) 2024-12-16T19:46:23,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742107_1283 (size=53616) 2024-12-16T19:46:23,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742108_1284 (size=29229) 2024-12-16T19:46:23,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742108_1284 (size=29229) 2024-12-16T19:46:23,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742108_1284 (size=29229) 2024-12-16T19:46:23,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742109_1285 (size=169089) 2024-12-16T19:46:23,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742109_1285 (size=169089) 2024-12-16T19:46:23,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742109_1285 (size=169089) 2024-12-16T19:46:24,255 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:46:24,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742110_1286 (size=5175431) 2024-12-16T19:46:24,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742110_1286 (size=5175431) 2024-12-16T19:46:24,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742110_1286 (size=5175431) 2024-12-16T19:46:24,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742111_1287 (size=136454) 2024-12-16T19:46:24,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742111_1287 (size=136454) 2024-12-16T19:46:24,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742111_1287 (size=136454) 2024-12-16T19:46:24,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742112_1288 (size=907467) 2024-12-16T19:46:24,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742112_1288 (size=907467) 2024-12-16T19:46:24,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742112_1288 (size=907467) 2024-12-16T19:46:24,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742113_1289 (size=3317408) 2024-12-16T19:46:24,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742113_1289 (size=3317408) 2024-12-16T19:46:24,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742113_1289 (size=3317408) 2024-12-16T19:46:24,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742114_1290 (size=503880) 2024-12-16T19:46:24,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742114_1290 (size=503880) 2024-12-16T19:46:24,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742114_1290 (size=503880) 2024-12-16T19:46:24,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742115_1291 (size=4695811) 2024-12-16T19:46:24,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742115_1291 (size=4695811) 2024-12-16T19:46:24,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742115_1291 (size=4695811) 2024-12-16T19:46:24,430 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:46:24,432 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-16T19:46:24,434 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.2 K 2024-12-16T19:46:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742116_1292 (size=714) 2024-12-16T19:46:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742116_1292 (size=714) 2024-12-16T19:46:24,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742116_1292 (size=714) 2024-12-16T19:46:24,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742117_1293 (size=15) 2024-12-16T19:46:24,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742117_1293 (size=15) 2024-12-16T19:46:24,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742117_1293 (size=15) 2024-12-16T19:46:24,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742118_1294 (size=304928) 2024-12-16T19:46:24,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742118_1294 (size=304928) 2024-12-16T19:46:24,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742118_1294 (size=304928) 2024-12-16T19:46:25,577 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:46:25,577 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:46:25,583 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0004_000001 (auth:SIMPLE) from 127.0.0.1:58002 2024-12-16T19:46:25,616 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0004/container_1734378249643_0004_01_000001/launch_container.sh] 2024-12-16T19:46:25,616 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0004/container_1734378249643_0004_01_000001/container_tokens] 2024-12-16T19:46:25,616 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0004/container_1734378249643_0004_01_000001/sysfs] 2024-12-16T19:46:26,389 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0005_000001 (auth:SIMPLE) from 127.0.0.1:36902 2024-12-16T19:46:32,869 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0005_000001 (auth:SIMPLE) from 127.0.0.1:36442 2024-12-16T19:46:33,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742119_1295 (size=350602) 2024-12-16T19:46:33,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742119_1295 (size=350602) 2024-12-16T19:46:33,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742119_1295 (size=350602) 2024-12-16T19:46:35,147 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0005_000001 (auth:SIMPLE) from 127.0.0.1:50908 2024-12-16T19:46:39,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742120_1296 (size=16925) 2024-12-16T19:46:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742120_1296 (size=16925) 2024-12-16T19:46:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742120_1296 (size=16925) 2024-12-16T19:46:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742121_1297 (size=462) 2024-12-16T19:46:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742121_1297 (size=462) 2024-12-16T19:46:39,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742121_1297 (size=462) 2024-12-16T19:46:39,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742122_1298 (size=16925) 2024-12-16T19:46:39,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742122_1298 (size=16925) 2024-12-16T19:46:39,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742122_1298 (size=16925) 2024-12-16T19:46:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742123_1299 (size=350602) 2024-12-16T19:46:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742123_1299 (size=350602) 2024-12-16T19:46:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742123_1299 (size=350602) 2024-12-16T19:46:39,998 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0005_000001 (auth:SIMPLE) from 127.0.0.1:50916 2024-12-16T19:46:40,023 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0005/container_1734378249643_0005_01_000002/launch_container.sh] 2024-12-16T19:46:40,023 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0005/container_1734378249643_0005_01_000002/container_tokens] 2024-12-16T19:46:40,023 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0005/container_1734378249643_0005_01_000002/sysfs] 2024-12-16T19:46:41,705 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:46:41,705 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:46:41,709 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-16T19:46:41,709 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:46:41,710 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:46:41,710 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T19:46:41,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T19:46:41,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T19:46:41,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@11dd3e06 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-16T19:46:41,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-16T19:46:41,712 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378359514/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-16T19:46:41,732 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-16T19:46:41,733 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-16T19:46:41,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:41,736 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378401735"}]},"ts":"1734378401735"} 2024-12-16T19:46:41,737 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-16T19:46:41,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-16T19:46:41,791 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-16T19:46:41,792 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-16T19:46:41,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, UNASSIGN}] 2024-12-16T19:46:41,801 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, UNASSIGN 2024-12-16T19:46:41,801 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, UNASSIGN 2024-12-16T19:46:41,806 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=66fba5611c85f6777b8785e0032e7521, regionState=CLOSING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:41,807 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=a80c77746132b3c3234c95a6bb030b86, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:41,812 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:46:41,812 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=94, state=RUNNABLE; CloseRegionProcedure 66fba5611c85f6777b8785e0032e7521, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:46:41,813 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:46:41,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=95, state=RUNNABLE; CloseRegionProcedure a80c77746132b3c3234c95a6bb030b86, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:46:41,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-16T19:46:41,958 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 66fba5611c85f6777b8785e0032e7521, had cached 0 bytes from a total of 6512 2024-12-16T19:46:41,958 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region a80c77746132b3c3234c95a6bb030b86, had cached 0 bytes from a total of 14247 2024-12-16T19:46:41,968 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:41,969 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:46:41,969 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:46:41,969 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 66fba5611c85f6777b8785e0032e7521, disabling compactions & flushes 2024-12-16T19:46:41,969 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:46:41,969 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:46:41,969 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. after waiting 0 ms 2024-12-16T19:46:41,969 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:46:41,970 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:41,971 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:46:41,971 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:46:41,971 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing a80c77746132b3c3234c95a6bb030b86, disabling compactions & flushes 2024-12-16T19:46:41,971 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:46:41,971 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:46:41,971 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. after waiting 0 ms 2024-12-16T19:46:41,971 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:46:41,978 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:46:41,982 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:46:41,983 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521. 2024-12-16T19:46:41,983 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 66fba5611c85f6777b8785e0032e7521: 2024-12-16T19:46:41,985 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 66fba5611c85f6777b8785e0032e7521 2024-12-16T19:46:41,987 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:46:41,987 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:46:41,988 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86. 2024-12-16T19:46:41,988 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for a80c77746132b3c3234c95a6bb030b86: 2024-12-16T19:46:41,990 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=66fba5611c85f6777b8785e0032e7521, regionState=CLOSED 2024-12-16T19:46:41,991 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:46:41,992 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=a80c77746132b3c3234c95a6bb030b86, regionState=CLOSED 2024-12-16T19:46:41,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=94 2024-12-16T19:46:41,995 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=94, state=SUCCESS; CloseRegionProcedure 66fba5611c85f6777b8785e0032e7521, server=7a6e4bf70377,45217,1734378236605 in 180 msec 2024-12-16T19:46:41,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=95 2024-12-16T19:46:41,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=95, state=SUCCESS; CloseRegionProcedure a80c77746132b3c3234c95a6bb030b86, server=7a6e4bf70377,42741,1734378236851 in 181 msec 2024-12-16T19:46:41,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-12-16T19:46:41,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=a80c77746132b3c3234c95a6bb030b86, UNASSIGN in 202 msec 2024-12-16T19:46:42,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=66fba5611c85f6777b8785e0032e7521, UNASSIGN in 201 msec 2024-12-16T19:46:42,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-16T19:46:42,002 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 208 msec 2024-12-16T19:46:42,004 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378402004"}]},"ts":"1734378402004"} 2024-12-16T19:46:42,006 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-16T19:46:42,016 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-16T19:46:42,018 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 284 msec 2024-12-16T19:46:42,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-16T19:46:42,053 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-16T19:46:42,057 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-16T19:46:42,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:42,060 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:42,061 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:42,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-16T19:46:42,071 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-16T19:46:42,074 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521 2024-12-16T19:46:42,079 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:46:42,082 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/recovered.edits] 2024-12-16T19:46:42,094 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/recovered.edits] 2024-12-16T19:46:42,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,116 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T19:46:42,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T19:46:42,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-16T19:46:42,124 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf/ec3c027bea8c4f12a03c4264584a9c1f to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/cf/ec3c027bea8c4f12a03c4264584a9c1f 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-16T19:46:42,126 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:42,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T19:46:42,130 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-16T19:46:42,130 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:46:42,147 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86/recovered.edits/9.seqid 2024-12-16T19:46:42,147 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:46:42,150 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf/928454092f0945a7ba5b23d09071c0b4 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/cf/928454092f0945a7ba5b23d09071c0b4 2024-12-16T19:46:42,155 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521/recovered.edits/9.seqid 2024-12-16T19:46:42,156 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testConsecutiveExports/66fba5611c85f6777b8785e0032e7521 2024-12-16T19:46:42,157 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-16T19:46:42,157 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-16T19:46:42,158 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-12-16T19:46:42,193 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241216fc9c7ed2dc144dc3acfa3c676715876b_a80c77746132b3c3234c95a6bb030b86 2024-12-16T19:46:42,196 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241216cfe544581e014a35a5a909fe64bea940_66fba5611c85f6777b8785e0032e7521 2024-12-16T19:46:42,198 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-12-16T19:46:42,202 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:42,209 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-16T19:46:42,222 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-16T19:46:42,224 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:42,224 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-16T19:46:42,224 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378402224"}]},"ts":"9223372036854775807"} 2024-12-16T19:46:42,224 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378402224"}]},"ts":"9223372036854775807"} 2024-12-16T19:46:42,227 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:46:42,227 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 66fba5611c85f6777b8785e0032e7521, NAME => 'testtb-testConsecutiveExports,,1734378356526.66fba5611c85f6777b8785e0032e7521.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a80c77746132b3c3234c95a6bb030b86, NAME => 'testtb-testConsecutiveExports,1,1734378356526.a80c77746132b3c3234c95a6bb030b86.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:46:42,227 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-16T19:46:42,227 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378402227"}]},"ts":"9223372036854775807"} 2024-12-16T19:46:42,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T19:46:42,239 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-16T19:46:42,254 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-16T19:46:42,256 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 197 msec 2024-12-16T19:46:42,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-16T19:46:42,435 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-16T19:46:42,443 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-16T19:46:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-16T19:46:42,447 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-16T19:46:42,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-16T19:46:42,482 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=791 (was 781) Potentially hanging thread: process reaper (pid 67564) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4341 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-25 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:44609 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_526348107_1 at /127.0.0.1:41496 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:41536 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/7a6e4bf70377:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44609 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_526348107_1 at /127.0.0.1:44470 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-24 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:55042 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 804), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1042 (was 1093), ProcessCount=17 (was 17), AvailableMemoryMB=2121 (was 2058) - AvailableMemoryMB LEAK? - 2024-12-16T19:46:42,482 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-12-16T19:46:42,518 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=791, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=1042, ProcessCount=17, AvailableMemoryMB=2103 2024-12-16T19:46:42,518 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=791 is superior to 500 2024-12-16T19:46:42,521 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:46:42,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:42,525 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:46:42,525 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-16T19:46:42,527 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:46:42,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T19:46:42,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742124_1300 (size=458) 2024-12-16T19:46:42,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742124_1300 (size=458) 2024-12-16T19:46:42,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742124_1300 (size=458) 2024-12-16T19:46:42,578 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b363d90a34b23056d96e4dd25b341ff7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:42,582 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 84a3ec9f16b896a68170287625bfe495, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:42,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T19:46:42,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742125_1301 (size=83) 2024-12-16T19:46:42,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742125_1301 (size=83) 2024-12-16T19:46:42,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742125_1301 (size=83) 2024-12-16T19:46:42,656 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:42,656 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing b363d90a34b23056d96e4dd25b341ff7, disabling compactions & flushes 2024-12-16T19:46:42,656 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:42,657 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:42,657 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. after waiting 0 ms 2024-12-16T19:46:42,657 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:42,657 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:42,657 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for b363d90a34b23056d96e4dd25b341ff7: 2024-12-16T19:46:42,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742126_1302 (size=83) 2024-12-16T19:46:42,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742126_1302 (size=83) 2024-12-16T19:46:42,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742126_1302 (size=83) 2024-12-16T19:46:42,678 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:42,678 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing 84a3ec9f16b896a68170287625bfe495, disabling compactions & flushes 2024-12-16T19:46:42,678 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:42,678 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:42,678 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. after waiting 0 ms 2024-12-16T19:46:42,678 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:42,678 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:42,678 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for 84a3ec9f16b896a68170287625bfe495: 2024-12-16T19:46:42,679 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:46:42,680 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734378402679"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378402679"}]},"ts":"1734378402679"} 2024-12-16T19:46:42,680 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1734378402679"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378402679"}]},"ts":"1734378402679"} 2024-12-16T19:46:42,682 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:46:42,683 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:46:42,683 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378402683"}]},"ts":"1734378402683"} 2024-12-16T19:46:42,685 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-16T19:46:42,755 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:46:42,757 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:46:42,757 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:46:42,757 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:46:42,757 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:46:42,757 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:46:42,757 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:46:42,757 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:46:42,758 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, ASSIGN}] 2024-12-16T19:46:42,759 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, ASSIGN 2024-12-16T19:46:42,759 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, ASSIGN 2024-12-16T19:46:42,760 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:46:42,761 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:46:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T19:46:42,911 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:46:42,911 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=84a3ec9f16b896a68170287625bfe495, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:42,912 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=b363d90a34b23056d96e4dd25b341ff7, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:46:42,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=100, state=RUNNABLE; OpenRegionProcedure b363d90a34b23056d96e4dd25b341ff7, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:46:42,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 84a3ec9f16b896a68170287625bfe495, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:46:43,068 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:46:43,071 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:43,072 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => b363d90a34b23056d96e4dd25b341ff7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:46:43,072 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. service=AccessControlService 2024-12-16T19:46:43,072 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:46:43,072 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,072 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:43,072 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,072 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,074 INFO [StoreOpener-b363d90a34b23056d96e4dd25b341ff7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:43,075 INFO [StoreOpener-b363d90a34b23056d96e4dd25b341ff7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b363d90a34b23056d96e4dd25b341ff7 columnFamilyName cf 2024-12-16T19:46:43,076 DEBUG [StoreOpener-b363d90a34b23056d96e4dd25b341ff7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:43,076 INFO [StoreOpener-b363d90a34b23056d96e4dd25b341ff7-1 {}] regionserver.HStore(327): Store=b363d90a34b23056d96e4dd25b341ff7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:46:43,077 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,078 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,080 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:43,087 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:43,087 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 84a3ec9f16b896a68170287625bfe495, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:46:43,087 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. service=AccessControlService 2024-12-16T19:46:43,087 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:46:43,087 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,087 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:43,088 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,088 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,089 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:46:43,089 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened b363d90a34b23056d96e4dd25b341ff7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61048186, jitterRate=-0.09031113982200623}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:46:43,089 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for b363d90a34b23056d96e4dd25b341ff7: 2024-12-16T19:46:43,090 INFO [StoreOpener-84a3ec9f16b896a68170287625bfe495-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,090 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7., pid=102, masterSystemTime=1734378403068 2024-12-16T19:46:43,092 INFO [StoreOpener-84a3ec9f16b896a68170287625bfe495-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 84a3ec9f16b896a68170287625bfe495 columnFamilyName cf 2024-12-16T19:46:43,093 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:43,093 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:43,093 DEBUG [StoreOpener-84a3ec9f16b896a68170287625bfe495-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:43,094 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=b363d90a34b23056d96e4dd25b341ff7, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:46:43,095 INFO [StoreOpener-84a3ec9f16b896a68170287625bfe495-1 {}] regionserver.HStore(327): Store=84a3ec9f16b896a68170287625bfe495/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:46:43,097 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,097 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=100 2024-12-16T19:46:43,098 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=100, state=SUCCESS; OpenRegionProcedure b363d90a34b23056d96e4dd25b341ff7, server=7a6e4bf70377,42861,1734378237024 in 181 msec 2024-12-16T19:46:43,100 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, ASSIGN in 340 msec 2024-12-16T19:46:43,100 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:43,103 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:46:43,103 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 84a3ec9f16b896a68170287625bfe495; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67027506, jitterRate=-0.0012123286724090576}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:46:43,103 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 84a3ec9f16b896a68170287625bfe495: 2024-12-16T19:46:43,104 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495., pid=103, masterSystemTime=1734378403074 2024-12-16T19:46:43,105 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:43,106 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:43,106 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=84a3ec9f16b896a68170287625bfe495, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:43,110 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-16T19:46:43,111 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 84a3ec9f16b896a68170287625bfe495, server=7a6e4bf70377,42741,1734378236851 in 186 msec 2024-12-16T19:46:43,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-12-16T19:46:43,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, ASSIGN in 353 msec 2024-12-16T19:46:43,115 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:46:43,115 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378403115"}]},"ts":"1734378403115"} 2024-12-16T19:46:43,117 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-16T19:46:43,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T19:46:43,132 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:46:43,133 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-16T19:46:43,152 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-16T19:46:43,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:43,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:43,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:43,163 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:43,172 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:43,172 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:43,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:43,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:43,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 651 msec 2024-12-16T19:46:43,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-16T19:46:43,633 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-16T19:46:43,637 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:43,637 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:43,637 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:46:43,680 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T19:46:43,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378403680 (current time:1734378403680). 2024-12-16T19:46:43,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:46:43,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-16T19:46:43,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:46:43,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6ae2d7f3 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a0f72aa 2024-12-16T19:46:43,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57e37c2f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:46:43,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:43,739 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55114, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6ae2d7f3 to 127.0.0.1:62515 2024-12-16T19:46:43,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:46:43,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a826c23 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1bd9dffb 2024-12-16T19:46:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5717caab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:46:43,780 DEBUG [hconnection-0x6a5b9320-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:43,782 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55116, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:43,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:43,787 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:43,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a826c23 to 127.0.0.1:62515 2024-12-16T19:46:43,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:46:43,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-16T19:46:43,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:46:43,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T19:46:43,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-16T19:46:43,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T19:46:43,803 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:46:43,804 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:46:43,807 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:46:43,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742127_1303 (size=215) 2024-12-16T19:46:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742127_1303 (size=215) 2024-12-16T19:46:43,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742127_1303 (size=215) 2024-12-16T19:46:43,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T19:46:44,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T19:46:44,267 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:46:44,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495}] 2024-12-16T19:46:44,268 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:44,268 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:44,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T19:46:44,419 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:44,419 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:46:44,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-16T19:46:44,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-16T19:46:44,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:44,420 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for 84a3ec9f16b896a68170287625bfe495: 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for b363d90a34b23056d96e4dd25b341ff7: 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:46:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:46:44,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742128_1304 (size=86) 2024-12-16T19:46:44,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742128_1304 (size=86) 2024-12-16T19:46:44,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742128_1304 (size=86) 2024-12-16T19:46:44,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:44,429 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-16T19:46:44,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-16T19:46:44,430 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:44,430 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:44,432 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495 in 164 msec 2024-12-16T19:46:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742129_1305 (size=86) 2024-12-16T19:46:44,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742129_1305 (size=86) 2024-12-16T19:46:44,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742129_1305 (size=86) 2024-12-16T19:46:44,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:44,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-16T19:46:44,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-16T19:46:44,447 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:44,447 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:44,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-16T19:46:44,466 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:46:44,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7 in 181 msec 2024-12-16T19:46:44,470 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:46:44,471 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:46:44,471 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:46:44,471 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:44,472 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:46:44,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742130_1306 (size=78) 2024-12-16T19:46:44,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742130_1306 (size=78) 2024-12-16T19:46:44,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742130_1306 (size=78) 2024-12-16T19:46:44,533 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:46:44,533 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:44,538 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:44,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742131_1307 (size=713) 2024-12-16T19:46:44,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742131_1307 (size=713) 2024-12-16T19:46:44,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742131_1307 (size=713) 2024-12-16T19:46:44,610 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:46:44,619 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:46:44,620 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:44,621 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:46:44,621 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-16T19:46:44,622 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 830 msec 2024-12-16T19:46:44,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-16T19:46:44,906 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-16T19:46:44,915 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:46:44,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:46:44,934 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:44,934 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:44,934 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:46:44,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T19:46:44,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378404954 (current time:1734378404954). 2024-12-16T19:46:44,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:46:44,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-16T19:46:44,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:46:44,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f29f257 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@295db001 2024-12-16T19:46:45,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24ee4ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:46:45,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:45,002 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51238, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:45,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f29f257 to 127.0.0.1:62515 2024-12-16T19:46:45,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:46:45,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x35a52270 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@71bc417d 2024-12-16T19:46:45,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4202aa9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:46:45,024 DEBUG [hconnection-0x1703f596-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:45,025 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:45,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:45,029 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:45,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x35a52270 to 127.0.0.1:62515 2024-12-16T19:46:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:46:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-16T19:46:45,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:46:45,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-16T19:46:45,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-16T19:46:45,033 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:46:45,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T19:46:45,034 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:46:45,038 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:46:45,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742132_1308 (size=210) 2024-12-16T19:46:45,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742132_1308 (size=210) 2024-12-16T19:46:45,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742132_1308 (size=210) 2024-12-16T19:46:45,105 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:46:45,106 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495}] 2024-12-16T19:46:45,108 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:45,108 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:45,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T19:46:45,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:45,263 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:46:45,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-16T19:46:45,264 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:45,265 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing b363d90a34b23056d96e4dd25b341ff7 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-16T19:46:45,266 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-16T19:46:45,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:45,266 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 84a3ec9f16b896a68170287625bfe495 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-16T19:46:45,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7 is 71, key is 0a44212e82b17203cf57631a9e3ef607/cf:q/1734378404919/Put/seqid=0 2024-12-16T19:46:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T19:46:45,342 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495 is 71, key is 1e1ef61af8bafbae83bdf204209f39d9/cf:q/1734378404914/Put/seqid=0 2024-12-16T19:46:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742133_1309 (size=5032) 2024-12-16T19:46:45,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742133_1309 (size=5032) 2024-12-16T19:46:45,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742133_1309 (size=5032) 2024-12-16T19:46:45,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:45,379 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:45,381 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/.tmp/cf/709d837400894cd48dec66c0564e28ce, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=b363d90a34b23056d96e4dd25b341ff7] 2024-12-16T19:46:45,382 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/.tmp/cf/709d837400894cd48dec66c0564e28ce is 224, key is 08f825d09907cc1817eb48103a76cac3c/cf:q/1734378404919/Put/seqid=0 2024-12-16T19:46:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742134_1310 (size=8242) 2024-12-16T19:46:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742134_1310 (size=8242) 2024-12-16T19:46:45,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742134_1310 (size=8242) 2024-12-16T19:46:45,434 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:45,473 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:45,482 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/.tmp/cf/da61126c3eee46e6b7755254d15c95db, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=84a3ec9f16b896a68170287625bfe495] 2024-12-16T19:46:45,483 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/.tmp/cf/da61126c3eee46e6b7755254d15c95db is 224, key is 129a709f5d2ea7354fa080152694f4bdf/cf:q/1734378404914/Put/seqid=0 2024-12-16T19:46:45,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742135_1311 (size=5754) 2024-12-16T19:46:45,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742135_1311 (size=5754) 2024-12-16T19:46:45,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742135_1311 (size=5754) 2024-12-16T19:46:45,534 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/.tmp/cf/709d837400894cd48dec66c0564e28ce 2024-12-16T19:46:45,561 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/.tmp/cf/709d837400894cd48dec66c0564e28ce as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf/709d837400894cd48dec66c0564e28ce 2024-12-16T19:46:45,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742136_1312 (size=15939) 2024-12-16T19:46:45,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742136_1312 (size=15939) 2024-12-16T19:46:45,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742136_1312 (size=15939) 2024-12-16T19:46:45,598 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/.tmp/cf/da61126c3eee46e6b7755254d15c95db 2024-12-16T19:46:45,603 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf/709d837400894cd48dec66c0564e28ce, entries=2, sequenceid=6, filesize=5.6 K 2024-12-16T19:46:45,622 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for b363d90a34b23056d96e4dd25b341ff7 in 357ms, sequenceid=6, compaction requested=false 2024-12-16T19:46:45,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-16T19:46:45,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for b363d90a34b23056d96e4dd25b341ff7: 2024-12-16T19:46:45,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T19:46:45,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:45,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:46:45,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf/709d837400894cd48dec66c0564e28ce] hfiles 2024-12-16T19:46:45,623 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf/709d837400894cd48dec66c0564e28ce for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:45,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/.tmp/cf/da61126c3eee46e6b7755254d15c95db as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf/da61126c3eee46e6b7755254d15c95db 2024-12-16T19:46:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T19:46:45,659 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf/da61126c3eee46e6b7755254d15c95db, entries=48, sequenceid=6, filesize=15.6 K 2024-12-16T19:46:45,907 INFO [AsyncFSWAL-0-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c-prefix:7a6e4bf70377,42741,1734378236851 {}] wal.AbstractFSWAL(1183): Slow sync cost: 246 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40033,DS-496d9931-f607-4803-8fbb-64e05aba6f02,DISK], DatanodeInfoWithStorage[127.0.0.1:39241,DS-4fb865bf-4391-49ae-b31e-3b62fb3795cf,DISK], DatanodeInfoWithStorage[127.0.0.1:43893,DS-a5be4d6e-06b0-4d3e-af01-bc15c82cbee1,DISK]] 2024-12-16T19:46:45,910 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 84a3ec9f16b896a68170287625bfe495 in 644ms, sequenceid=6, compaction requested=false 2024-12-16T19:46:45,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 84a3ec9f16b896a68170287625bfe495: 2024-12-16T19:46:45,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-16T19:46:45,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:45,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:46:45,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf/da61126c3eee46e6b7755254d15c95db] hfiles 2024-12-16T19:46:45,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf/da61126c3eee46e6b7755254d15c95db for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:45,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742137_1313 (size=125) 2024-12-16T19:46:45,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742137_1313 (size=125) 2024-12-16T19:46:45,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742137_1313 (size=125) 2024-12-16T19:46:45,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:46:45,941 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-16T19:46:45,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-16T19:46:45,941 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:45,942 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:45,943 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure b363d90a34b23056d96e4dd25b341ff7 in 836 msec 2024-12-16T19:46:45,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742138_1314 (size=125) 2024-12-16T19:46:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742138_1314 (size=125) 2024-12-16T19:46:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742138_1314 (size=125) 2024-12-16T19:46:45,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:46:45,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-16T19:46:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-16T19:46:45,947 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:45,947 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:45,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-12-16T19:46:45,950 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:46:45,950 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure 84a3ec9f16b896a68170287625bfe495 in 842 msec 2024-12-16T19:46:45,951 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:46:45,955 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:46:45,955 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:46:45,955 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:45,959 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7] hfiles 2024-12-16T19:46:45,959 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495 2024-12-16T19:46:45,959 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:46:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742139_1315 (size=309) 2024-12-16T19:46:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742139_1315 (size=309) 2024-12-16T19:46:45,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742139_1315 (size=309) 2024-12-16T19:46:45,976 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:46:45,976 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:45,976 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:45,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742140_1316 (size=1023) 2024-12-16T19:46:46,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742140_1316 (size=1023) 2024-12-16T19:46:46,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742140_1316 (size=1023) 2024-12-16T19:46:46,004 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:46:46,015 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:46:46,016 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:46,018 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:46:46,018 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-16T19:46:46,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 986 msec 2024-12-16T19:46:46,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:46:46,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-16T19:46:46,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-16T19:46:46,102 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0005_000001 (auth:SIMPLE) from 127.0.0.1:34376 2024-12-16T19:46:46,121 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0005/container_1734378249643_0005_01_000001/launch_container.sh] 2024-12-16T19:46:46,121 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0005/container_1734378249643_0005_01_000001/container_tokens] 2024-12-16T19:46:46,121 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0005/container_1734378249643_0005_01_000001/sysfs] 2024-12-16T19:46:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-16T19:46:46,145 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-16T19:46:46,177 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T19:46:46,182 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51262, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T19:46:46,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-16T19:46:46,184 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T19:46:46,186 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34236, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T19:46:46,186 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-16T19:46:46,187 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-16T19:46:46,193 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51650, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-16T19:46:46,193 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-16T19:46:46,197 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:46:46,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:46,199 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:46:46,199 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:46,201 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-16T19:46:46,202 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:46:46,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T19:46:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742141_1317 (size=399) 2024-12-16T19:46:46,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742141_1317 (size=399) 2024-12-16T19:46:46,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742141_1317 (size=399) 2024-12-16T19:46:46,231 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e659069ce8fd38dd0b5ece8b797d8834, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:46,243 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 8a1a24737a4dc7838eb30fb713491018, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742142_1318 (size=85) 2024-12-16T19:46:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742142_1318 (size=85) 2024-12-16T19:46:46,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742142_1318 (size=85) 2024-12-16T19:46:46,275 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:46,275 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing e659069ce8fd38dd0b5ece8b797d8834, disabling compactions & flushes 2024-12-16T19:46:46,275 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,275 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,275 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. after waiting 0 ms 2024-12-16T19:46:46,275 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,276 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,276 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for e659069ce8fd38dd0b5ece8b797d8834: 2024-12-16T19:46:46,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742143_1319 (size=85) 2024-12-16T19:46:46,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742143_1319 (size=85) 2024-12-16T19:46:46,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742143_1319 (size=85) 2024-12-16T19:46:46,296 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:46,296 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing 8a1a24737a4dc7838eb30fb713491018, disabling compactions & flushes 2024-12-16T19:46:46,296 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,296 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,296 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. after waiting 0 ms 2024-12-16T19:46:46,296 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,296 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,296 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for 8a1a24737a4dc7838eb30fb713491018: 2024-12-16T19:46:46,299 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:46:46,299 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734378406299"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378406299"}]},"ts":"1734378406299"} 2024-12-16T19:46:46,299 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1734378406299"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378406299"}]},"ts":"1734378406299"} 2024-12-16T19:46:46,302 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:46:46,304 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:46:46,304 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378406304"}]},"ts":"1734378406304"} 2024-12-16T19:46:46,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T19:46:46,307 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-16T19:46:46,376 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:46:46,378 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:46:46,378 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:46:46,378 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:46:46,378 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:46:46,378 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:46:46,378 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:46:46,378 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:46:46,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, ASSIGN}] 2024-12-16T19:46:46,382 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, ASSIGN 2024-12-16T19:46:46,382 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, ASSIGN 2024-12-16T19:46:46,389 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:46:46,389 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:46:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T19:46:46,540 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:46:46,540 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=8a1a24737a4dc7838eb30fb713491018, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:46,540 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=e659069ce8fd38dd0b5ece8b797d8834, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:46,543 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=111, state=RUNNABLE; OpenRegionProcedure e659069ce8fd38dd0b5ece8b797d8834, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:46:46,545 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=112, state=RUNNABLE; OpenRegionProcedure 8a1a24737a4dc7838eb30fb713491018, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:46:46,697 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:46,699 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:46,702 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,702 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => e659069ce8fd38dd0b5ece8b797d8834, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834.', STARTKEY => '', ENDKEY => '2'} 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. service=AccessControlService 2024-12-16T19:46:46,703 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,703 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => 8a1a24737a4dc7838eb30fb713491018, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018.', STARTKEY => '2', ENDKEY => ''} 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. service=AccessControlService 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,703 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,703 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:46,704 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,704 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,705 INFO [StoreOpener-8a1a24737a4dc7838eb30fb713491018-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,707 INFO [StoreOpener-8a1a24737a4dc7838eb30fb713491018-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8a1a24737a4dc7838eb30fb713491018 columnFamilyName cf 2024-12-16T19:46:46,707 DEBUG [StoreOpener-8a1a24737a4dc7838eb30fb713491018-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:46,708 INFO [StoreOpener-8a1a24737a4dc7838eb30fb713491018-1 {}] regionserver.HStore(327): Store=8a1a24737a4dc7838eb30fb713491018/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:46:46,709 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,709 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,712 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:46,722 INFO [StoreOpener-e659069ce8fd38dd0b5ece8b797d8834-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,725 INFO [StoreOpener-e659069ce8fd38dd0b5ece8b797d8834-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e659069ce8fd38dd0b5ece8b797d8834 columnFamilyName cf 2024-12-16T19:46:46,725 DEBUG [StoreOpener-e659069ce8fd38dd0b5ece8b797d8834-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:46,726 INFO [StoreOpener-e659069ce8fd38dd0b5ece8b797d8834-1 {}] regionserver.HStore(327): Store=e659069ce8fd38dd0b5ece8b797d8834/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:46:46,727 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,728 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,728 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:46:46,728 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened 8a1a24737a4dc7838eb30fb713491018; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73191714, jitterRate=0.09064152836799622}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:46:46,729 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for 8a1a24737a4dc7838eb30fb713491018: 2024-12-16T19:46:46,730 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018., pid=114, masterSystemTime=1734378406699 2024-12-16T19:46:46,733 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,733 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:46,733 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=8a1a24737a4dc7838eb30fb713491018, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:46,737 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:46,739 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=112 2024-12-16T19:46:46,741 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=112, state=SUCCESS; OpenRegionProcedure 8a1a24737a4dc7838eb30fb713491018, server=7a6e4bf70377,45217,1734378236605 in 191 msec 2024-12-16T19:46:46,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, ASSIGN in 360 msec 2024-12-16T19:46:46,751 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:46:46,752 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened e659069ce8fd38dd0b5ece8b797d8834; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62729146, jitterRate=-0.06526288390159607}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:46:46,752 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for e659069ce8fd38dd0b5ece8b797d8834: 2024-12-16T19:46:46,753 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834., pid=113, masterSystemTime=1734378406696 2024-12-16T19:46:46,765 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,765 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:46,766 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=e659069ce8fd38dd0b5ece8b797d8834, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:46,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=111 2024-12-16T19:46:46,794 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=111, state=SUCCESS; OpenRegionProcedure e659069ce8fd38dd0b5ece8b797d8834, server=7a6e4bf70377,42741,1734378236851 in 225 msec 2024-12-16T19:46:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T19:46:46,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-16T19:46:46,811 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, ASSIGN in 415 msec 2024-12-16T19:46:46,812 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:46:46,813 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378406812"}]},"ts":"1734378406812"} 2024-12-16T19:46:46,815 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-16T19:46:46,834 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:46:46,835 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-16T19:46:46,847 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-16T19:46:46,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:46,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:46,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:46,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:46:46,866 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,866 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,867 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-16T19:46:46,874 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 669 msec 2024-12-16T19:46:47,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-16T19:46:47,312 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-16T19:46:47,332 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [e659069ce8fd38dd0b5ece8b797d8834, 8a1a24737a4dc7838eb30fb713491018] 2024-12-16T19:46:47,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e659069ce8fd38dd0b5ece8b797d8834, 8a1a24737a4dc7838eb30fb713491018], force=true 2024-12-16T19:46:47,341 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e659069ce8fd38dd0b5ece8b797d8834, 8a1a24737a4dc7838eb30fb713491018], force=true 2024-12-16T19:46:47,341 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e659069ce8fd38dd0b5ece8b797d8834, 8a1a24737a4dc7838eb30fb713491018], force=true 2024-12-16T19:46:47,341 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e659069ce8fd38dd0b5ece8b797d8834, 8a1a24737a4dc7838eb30fb713491018], force=true 2024-12-16T19:46:47,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T19:46:47,361 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, UNASSIGN}] 2024-12-16T19:46:47,364 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, UNASSIGN 2024-12-16T19:46:47,365 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, UNASSIGN 2024-12-16T19:46:47,366 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=e659069ce8fd38dd0b5ece8b797d8834, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:47,367 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=8a1a24737a4dc7838eb30fb713491018, regionState=CLOSING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:47,369 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-16T19:46:47,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE; CloseRegionProcedure e659069ce8fd38dd0b5ece8b797d8834, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:46:47,376 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-16T19:46:47,377 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=117, state=RUNNABLE; CloseRegionProcedure 8a1a24737a4dc7838eb30fb713491018, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:46:47,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T19:46:47,523 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:46:47,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:47,528 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:47,528 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-16T19:46:47,528 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing e659069ce8fd38dd0b5ece8b797d8834, disabling compactions & flushes 2024-12-16T19:46:47,528 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:46:47,528 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:47,528 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:47,528 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. after waiting 0 ms 2024-12-16T19:46:47,528 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:47,528 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing e659069ce8fd38dd0b5ece8b797d8834 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-16T19:46:47,528 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:47,528 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-16T19:46:47,529 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 8a1a24737a4dc7838eb30fb713491018, disabling compactions & flushes 2024-12-16T19:46:47,529 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:47,529 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:47,529 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. after waiting 0 ms 2024-12-16T19:46:47,529 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:47,529 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 8a1a24737a4dc7838eb30fb713491018 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-16T19:46:47,543 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/.tmp/cf/1eabd562a4ac4aef89d25d6d52bfd7bc is 28, key is 1/cf:/1734378407314/Put/seqid=0 2024-12-16T19:46:47,548 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/.tmp/cf/7604c463d4514877b41e600d5b897861 is 28, key is 2/cf:/1734378407318/Put/seqid=0 2024-12-16T19:46:47,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742144_1320 (size=4945) 2024-12-16T19:46:47,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742144_1320 (size=4945) 2024-12-16T19:46:47,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742144_1320 (size=4945) 2024-12-16T19:46:47,565 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/.tmp/cf/1eabd562a4ac4aef89d25d6d52bfd7bc 2024-12-16T19:46:47,572 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/.tmp/cf/1eabd562a4ac4aef89d25d6d52bfd7bc as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf/1eabd562a4ac4aef89d25d6d52bfd7bc 2024-12-16T19:46:47,577 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf/1eabd562a4ac4aef89d25d6d52bfd7bc, entries=1, sequenceid=5, filesize=4.8 K 2024-12-16T19:46:47,578 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for e659069ce8fd38dd0b5ece8b797d8834 in 50ms, sequenceid=5, compaction requested=false 2024-12-16T19:46:47,578 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-16T19:46:47,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742145_1321 (size=4945) 2024-12-16T19:46:47,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742145_1321 (size=4945) 2024-12-16T19:46:47,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742145_1321 (size=4945) 2024-12-16T19:46:47,580 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/.tmp/cf/7604c463d4514877b41e600d5b897861 2024-12-16T19:46:47,584 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T19:46:47,585 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:46:47,585 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834. 2024-12-16T19:46:47,585 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for e659069ce8fd38dd0b5ece8b797d8834: 2024-12-16T19:46:47,586 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/.tmp/cf/7604c463d4514877b41e600d5b897861 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf/7604c463d4514877b41e600d5b897861 2024-12-16T19:46:47,586 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:46:47,587 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=e659069ce8fd38dd0b5ece8b797d8834, regionState=CLOSED 2024-12-16T19:46:47,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=116 2024-12-16T19:46:47,590 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=116, state=SUCCESS; CloseRegionProcedure e659069ce8fd38dd0b5ece8b797d8834, server=7a6e4bf70377,42741,1734378236851 in 220 msec 2024-12-16T19:46:47,591 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=e659069ce8fd38dd0b5ece8b797d8834, UNASSIGN in 229 msec 2024-12-16T19:46:47,591 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf/7604c463d4514877b41e600d5b897861, entries=1, sequenceid=5, filesize=4.8 K 2024-12-16T19:46:47,592 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 8a1a24737a4dc7838eb30fb713491018 in 63ms, sequenceid=5, compaction requested=false 2024-12-16T19:46:47,595 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T19:46:47,596 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:46:47,596 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018. 2024-12-16T19:46:47,596 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 8a1a24737a4dc7838eb30fb713491018: 2024-12-16T19:46:47,597 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:46:47,598 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=8a1a24737a4dc7838eb30fb713491018, regionState=CLOSED 2024-12-16T19:46:47,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=117 2024-12-16T19:46:47,601 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=117, state=SUCCESS; CloseRegionProcedure 8a1a24737a4dc7838eb30fb713491018, server=7a6e4bf70377,45217,1734378236605 in 223 msec 2024-12-16T19:46:47,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=115 2024-12-16T19:46:47,602 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=8a1a24737a4dc7838eb30fb713491018, UNASSIGN in 240 msec 2024-12-16T19:46:47,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742146_1322 (size=84) 2024-12-16T19:46:47,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742146_1322 (size=84) 2024-12-16T19:46:47,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742146_1322 (size=84) 2024-12-16T19:46:47,614 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:47,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742147_1323 (size=20) 2024-12-16T19:46:47,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742147_1323 (size=20) 2024-12-16T19:46:47,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742147_1323 (size=20) 2024-12-16T19:46:47,625 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:47,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742148_1324 (size=21) 2024-12-16T19:46:47,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742148_1324 (size=21) 2024-12-16T19:46:47,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742148_1324 (size=21) 2024-12-16T19:46:47,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742149_1325 (size=84) 2024-12-16T19:46:47,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742149_1325 (size=84) 2024-12-16T19:46:47,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742149_1325 (size=84) 2024-12-16T19:46:47,637 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:47,647 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-16T19:46:47,650 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406196.e659069ce8fd38dd0b5ece8b797d8834.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-16T19:46:47,650 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1734378406196.8a1a24737a4dc7838eb30fb713491018.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-16T19:46:47,650 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-16T19:46:47,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T19:46:47,686 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, ASSIGN}] 2024-12-16T19:46:47,687 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, ASSIGN 2024-12-16T19:46:47,688 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, ASSIGN; state=MERGED, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:46:47,838 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-16T19:46:47,839 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=4e505bd67acb2881a003b7511655184b, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:47,840 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 4e505bd67acb2881a003b7511655184b, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:46:47,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T19:46:47,992 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:47,995 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:46:47,995 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 4e505bd67acb2881a003b7511655184b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.', STARTKEY => '', ENDKEY => ''} 2024-12-16T19:46:47,996 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. service=AccessControlService 2024-12-16T19:46:47,996 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:46:47,996 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:47,996 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:46:47,996 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:47,996 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:47,998 INFO [StoreOpener-4e505bd67acb2881a003b7511655184b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:47,999 INFO [StoreOpener-4e505bd67acb2881a003b7511655184b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4e505bd67acb2881a003b7511655184b columnFamilyName cf 2024-12-16T19:46:47,999 DEBUG [StoreOpener-4e505bd67acb2881a003b7511655184b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:46:48,015 DEBUG [StoreOpener-4e505bd67acb2881a003b7511655184b-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/1eabd562a4ac4aef89d25d6d52bfd7bc.e659069ce8fd38dd0b5ece8b797d8834->hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf/1eabd562a4ac4aef89d25d6d52bfd7bc-top 2024-12-16T19:46:48,020 DEBUG [StoreOpener-4e505bd67acb2881a003b7511655184b-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/7604c463d4514877b41e600d5b897861.8a1a24737a4dc7838eb30fb713491018->hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf/7604c463d4514877b41e600d5b897861-top 2024-12-16T19:46:48,021 INFO [StoreOpener-4e505bd67acb2881a003b7511655184b-1 {}] regionserver.HStore(327): Store=4e505bd67acb2881a003b7511655184b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:46:48,022 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:48,023 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:48,025 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:48,026 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 4e505bd67acb2881a003b7511655184b; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72389560, jitterRate=0.07868850231170654}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:46:48,027 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 4e505bd67acb2881a003b7511655184b: 2024-12-16T19:46:48,027 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b., pid=121, masterSystemTime=1734378407992 2024-12-16T19:46:48,028 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.,because compaction is disabled. 2024-12-16T19:46:48,029 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:46:48,029 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:46:48,029 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=4e505bd67acb2881a003b7511655184b, regionState=OPEN, openSeqNum=9, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:48,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-16T19:46:48,032 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 4e505bd67acb2881a003b7511655184b, server=7a6e4bf70377,42741,1734378236851 in 191 msec 2024-12-16T19:46:48,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-16T19:46:48,033 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, ASSIGN in 346 msec 2024-12-16T19:46:48,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[e659069ce8fd38dd0b5ece8b797d8834, 8a1a24737a4dc7838eb30fb713491018], force=true in 699 msec 2024-12-16T19:46:48,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-16T19:46:48,455 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-16T19:46:48,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-16T19:46:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378408456 (current time:1734378408456). 2024-12-16T19:46:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:46:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-16T19:46:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:46:48,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c92324a to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52bf16b5 2024-12-16T19:46:48,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5de7b4e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:46:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:48,494 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:48,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c92324a to 127.0.0.1:62515 2024-12-16T19:46:48,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:46:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x33f3fa66 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@19d35813 2024-12-16T19:46:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7973f73c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:46:48,515 DEBUG [hconnection-0x139b59a4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:48,516 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:48,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:46:48,519 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51656, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:46:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x33f3fa66 to 127.0.0.1:62515 2024-12-16T19:46:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:46:48,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-16T19:46:48,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:46:48,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-16T19:46:48,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-16T19:46:48,523 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:46:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T19:46:48,525 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:46:48,527 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:46:48,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742150_1326 (size=216) 2024-12-16T19:46:48,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742150_1326 (size=216) 2024-12-16T19:46:48,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742150_1326 (size=216) 2024-12-16T19:46:48,535 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:46:48,535 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 4e505bd67acb2881a003b7511655184b}] 2024-12-16T19:46:48,536 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:48,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T19:46:48,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:46:48,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-16T19:46:48,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:46:48,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 4e505bd67acb2881a003b7511655184b: 2024-12-16T19:46:48,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-16T19:46:48,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:48,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:46:48,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/1eabd562a4ac4aef89d25d6d52bfd7bc.e659069ce8fd38dd0b5ece8b797d8834->hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf/1eabd562a4ac4aef89d25d6d52bfd7bc-top, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/7604c463d4514877b41e600d5b897861.8a1a24737a4dc7838eb30fb713491018->hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf/7604c463d4514877b41e600d5b897861-top] hfiles 2024-12-16T19:46:48,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/1eabd562a4ac4aef89d25d6d52bfd7bc.e659069ce8fd38dd0b5ece8b797d8834 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:48,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/7604c463d4514877b41e600d5b897861.8a1a24737a4dc7838eb30fb713491018 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:48,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742151_1327 (size=269) 2024-12-16T19:46:48,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742151_1327 (size=269) 2024-12-16T19:46:48,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742151_1327 (size=269) 2024-12-16T19:46:48,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:46:48,718 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-16T19:46:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-16T19:46:48,719 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:48,719 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 4e505bd67acb2881a003b7511655184b 2024-12-16T19:46:48,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-16T19:46:48,723 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:46:48,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 4e505bd67acb2881a003b7511655184b in 185 msec 2024-12-16T19:46:48,724 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:46:48,725 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:46:48,725 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:48,726 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:48,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742152_1328 (size=670) 2024-12-16T19:46:48,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742152_1328 (size=670) 2024-12-16T19:46:48,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742152_1328 (size=670) 2024-12-16T19:46:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T19:46:49,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T19:46:49,154 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:46:49,162 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:46:49,163 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:49,164 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:46:49,164 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-16T19:46:49,167 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 643 msec 2024-12-16T19:46:49,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-16T19:46:49,628 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-16T19:46:49,628 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628 2024-12-16T19:46:49,629 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:49,663 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:46:49,663 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:49,665 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:46:49,672 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:49,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742153_1329 (size=670) 2024-12-16T19:46:49,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742153_1329 (size=670) 2024-12-16T19:46:49,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742153_1329 (size=670) 2024-12-16T19:46:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742154_1330 (size=216) 2024-12-16T19:46:49,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742154_1330 (size=216) 2024-12-16T19:46:49,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742154_1330 (size=216) 2024-12-16T19:46:49,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:49,693 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:49,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:49,694 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-16140439024463086576.jar 2024-12-16T19:46:50,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,736 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-14005840636652317784.jar 2024-12-16T19:46:50,815 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,816 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:46:50,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:46:50,817 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:46:50,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:46:50,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:46:50,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:46:50,818 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:46:50,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:46:50,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:46:50,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:46:50,819 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:46:50,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:46:50,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:46:50,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:50,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:50,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:50,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:50,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:46:50,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:50,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:46:50,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742155_1331 (size=127628) 2024-12-16T19:46:50,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742155_1331 (size=127628) 2024-12-16T19:46:50,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742155_1331 (size=127628) 2024-12-16T19:46:50,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742156_1332 (size=2172137) 2024-12-16T19:46:50,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742156_1332 (size=2172137) 2024-12-16T19:46:50,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742156_1332 (size=2172137) 2024-12-16T19:46:50,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742157_1333 (size=213228) 2024-12-16T19:46:50,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742157_1333 (size=213228) 2024-12-16T19:46:50,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742157_1333 (size=213228) 2024-12-16T19:46:50,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742158_1334 (size=1877034) 2024-12-16T19:46:50,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742158_1334 (size=1877034) 2024-12-16T19:46:50,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742158_1334 (size=1877034) 2024-12-16T19:46:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742159_1335 (size=533455) 2024-12-16T19:46:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742159_1335 (size=533455) 2024-12-16T19:46:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742159_1335 (size=533455) 2024-12-16T19:46:51,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742160_1336 (size=7280644) 2024-12-16T19:46:51,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742160_1336 (size=7280644) 2024-12-16T19:46:51,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742160_1336 (size=7280644) 2024-12-16T19:46:51,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742161_1337 (size=6350922) 2024-12-16T19:46:51,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742161_1337 (size=6350922) 2024-12-16T19:46:51,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742161_1337 (size=6350922) 2024-12-16T19:46:51,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742162_1338 (size=4188619) 2024-12-16T19:46:51,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742162_1338 (size=4188619) 2024-12-16T19:46:51,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742162_1338 (size=4188619) 2024-12-16T19:46:51,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742163_1339 (size=20406) 2024-12-16T19:46:51,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742163_1339 (size=20406) 2024-12-16T19:46:51,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742163_1339 (size=20406) 2024-12-16T19:46:51,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742164_1340 (size=75495) 2024-12-16T19:46:51,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742164_1340 (size=75495) 2024-12-16T19:46:51,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742164_1340 (size=75495) 2024-12-16T19:46:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742165_1341 (size=45609) 2024-12-16T19:46:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742165_1341 (size=45609) 2024-12-16T19:46:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742165_1341 (size=45609) 2024-12-16T19:46:51,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742166_1342 (size=110084) 2024-12-16T19:46:51,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742166_1342 (size=110084) 2024-12-16T19:46:51,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742166_1342 (size=110084) 2024-12-16T19:46:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742167_1343 (size=1323991) 2024-12-16T19:46:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742167_1343 (size=1323991) 2024-12-16T19:46:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742167_1343 (size=1323991) 2024-12-16T19:46:51,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742168_1344 (size=23076) 2024-12-16T19:46:51,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742168_1344 (size=23076) 2024-12-16T19:46:51,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742168_1344 (size=23076) 2024-12-16T19:46:51,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742169_1345 (size=126803) 2024-12-16T19:46:51,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742169_1345 (size=126803) 2024-12-16T19:46:51,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742169_1345 (size=126803) 2024-12-16T19:46:51,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742170_1346 (size=322274) 2024-12-16T19:46:51,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742170_1346 (size=322274) 2024-12-16T19:46:51,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742170_1346 (size=322274) 2024-12-16T19:46:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742171_1347 (size=1832290) 2024-12-16T19:46:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742171_1347 (size=1832290) 2024-12-16T19:46:51,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742171_1347 (size=1832290) 2024-12-16T19:46:51,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742172_1348 (size=30081) 2024-12-16T19:46:51,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742172_1348 (size=30081) 2024-12-16T19:46:51,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742172_1348 (size=30081) 2024-12-16T19:46:51,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742173_1349 (size=53616) 2024-12-16T19:46:51,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742173_1349 (size=53616) 2024-12-16T19:46:51,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742173_1349 (size=53616) 2024-12-16T19:46:51,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742174_1350 (size=29229) 2024-12-16T19:46:51,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742174_1350 (size=29229) 2024-12-16T19:46:51,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742174_1350 (size=29229) 2024-12-16T19:46:51,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742175_1351 (size=169089) 2024-12-16T19:46:51,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742175_1351 (size=169089) 2024-12-16T19:46:51,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742175_1351 (size=169089) 2024-12-16T19:46:51,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742176_1352 (size=5175431) 2024-12-16T19:46:51,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742176_1352 (size=5175431) 2024-12-16T19:46:51,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742176_1352 (size=5175431) 2024-12-16T19:46:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742177_1353 (size=136454) 2024-12-16T19:46:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742177_1353 (size=136454) 2024-12-16T19:46:51,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742177_1353 (size=136454) 2024-12-16T19:46:51,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742178_1354 (size=907467) 2024-12-16T19:46:51,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742178_1354 (size=907467) 2024-12-16T19:46:51,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742178_1354 (size=907467) 2024-12-16T19:46:51,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742179_1355 (size=3317408) 2024-12-16T19:46:51,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742179_1355 (size=3317408) 2024-12-16T19:46:51,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742179_1355 (size=3317408) 2024-12-16T19:46:51,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742180_1356 (size=451756) 2024-12-16T19:46:51,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742180_1356 (size=451756) 2024-12-16T19:46:51,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742180_1356 (size=451756) 2024-12-16T19:46:51,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742181_1357 (size=503880) 2024-12-16T19:46:51,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742181_1357 (size=503880) 2024-12-16T19:46:51,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742181_1357 (size=503880) 2024-12-16T19:46:51,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742182_1358 (size=4695811) 2024-12-16T19:46:51,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742182_1358 (size=4695811) 2024-12-16T19:46:51,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742182_1358 (size=4695811) 2024-12-16T19:46:51,614 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:46:51,618 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-16T19:46:51,620 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-16T19:46:51,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742183_1359 (size=378) 2024-12-16T19:46:51,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742183_1359 (size=378) 2024-12-16T19:46:51,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742183_1359 (size=378) 2024-12-16T19:46:51,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742184_1360 (size=15) 2024-12-16T19:46:51,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742184_1360 (size=15) 2024-12-16T19:46:51,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742184_1360 (size=15) 2024-12-16T19:46:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742185_1361 (size=304940) 2024-12-16T19:46:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742185_1361 (size=304940) 2024-12-16T19:46:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742185_1361 (size=304940) 2024-12-16T19:46:51,696 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:46:51,696 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:46:52,092 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0006_000001 (auth:SIMPLE) from 127.0.0.1:34392 2024-12-16T19:46:53,086 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:46:54,256 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:46:56,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:46:56,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-16T19:46:57,192 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0006_000001 (auth:SIMPLE) from 127.0.0.1:39228 2024-12-16T19:46:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742186_1362 (size=350614) 2024-12-16T19:46:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742186_1362 (size=350614) 2024-12-16T19:46:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742186_1362 (size=350614) 2024-12-16T19:46:59,404 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0006_000001 (auth:SIMPLE) from 127.0.0.1:56644 2024-12-16T19:47:01,890 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 84a3ec9f16b896a68170287625bfe495 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:47:01,891 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region b363d90a34b23056d96e4dd25b341ff7 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:47:02,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742187_1363 (size=4945) 2024-12-16T19:47:02,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742187_1363 (size=4945) 2024-12-16T19:47:02,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742187_1363 (size=4945) 2024-12-16T19:47:02,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742188_1364 (size=4945) 2024-12-16T19:47:02,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742188_1364 (size=4945) 2024-12-16T19:47:02,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742188_1364 (size=4945) 2024-12-16T19:47:02,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742189_1365 (size=17474) 2024-12-16T19:47:02,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742189_1365 (size=17474) 2024-12-16T19:47:02,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742189_1365 (size=17474) 2024-12-16T19:47:02,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742190_1366 (size=482) 2024-12-16T19:47:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742190_1366 (size=482) 2024-12-16T19:47:02,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742190_1366 (size=482) 2024-12-16T19:47:02,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742191_1367 (size=17474) 2024-12-16T19:47:02,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742191_1367 (size=17474) 2024-12-16T19:47:02,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742191_1367 (size=17474) 2024-12-16T19:47:02,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742192_1368 (size=350614) 2024-12-16T19:47:02,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742192_1368 (size=350614) 2024-12-16T19:47:02,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742192_1368 (size=350614) 2024-12-16T19:47:02,768 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_3/usercache/jenkins/appcache/application_1734378249643_0006/container_1734378249643_0006_01_000002/launch_container.sh] 2024-12-16T19:47:02,768 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_3/usercache/jenkins/appcache/application_1734378249643_0006/container_1734378249643_0006_01_000002/container_tokens] 2024-12-16T19:47:02,768 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_3/usercache/jenkins/appcache/application_1734378249643_0006/container_1734378249643_0006_01_000002/sysfs] 2024-12-16T19:47:02,774 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0006_000001 (auth:SIMPLE) from 127.0.0.1:56648 2024-12-16T19:47:04,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:47:04,013 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:47:04,019 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,019 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:47:04,020 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:47:04,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-16T19:47:04,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-16T19:47:04,020 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,021 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-16T19:47:04,021 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378409628/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-16T19:47:04,027 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,028 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T19:47:04,031 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378424030"}]},"ts":"1734378424030"} 2024-12-16T19:47:04,032 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-16T19:47:04,073 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-16T19:47:04,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-16T19:47:04,076 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, UNASSIGN}] 2024-12-16T19:47:04,076 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, UNASSIGN 2024-12-16T19:47:04,077 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=4e505bd67acb2881a003b7511655184b, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:04,078 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:04,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 4e505bd67acb2881a003b7511655184b, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:47:04,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T19:47:04,229 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:04,230 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 4e505bd67acb2881a003b7511655184b 2024-12-16T19:47:04,230 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:04,230 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 4e505bd67acb2881a003b7511655184b, disabling compactions & flushes 2024-12-16T19:47:04,230 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:47:04,230 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:47:04,230 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. after waiting 0 ms 2024-12-16T19:47:04,230 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:47:04,235 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-16T19:47:04,236 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:04,236 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b. 2024-12-16T19:47:04,236 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 4e505bd67acb2881a003b7511655184b: 2024-12-16T19:47:04,238 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 4e505bd67acb2881a003b7511655184b 2024-12-16T19:47:04,238 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=4e505bd67acb2881a003b7511655184b, regionState=CLOSED 2024-12-16T19:47:04,240 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-16T19:47:04,241 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 4e505bd67acb2881a003b7511655184b, server=7a6e4bf70377,42741,1734378236851 in 161 msec 2024-12-16T19:47:04,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-16T19:47:04,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=4e505bd67acb2881a003b7511655184b, UNASSIGN in 165 msec 2024-12-16T19:47:04,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-16T19:47:04,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 168 msec 2024-12-16T19:47:04,243 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378424243"}]},"ts":"1734378424243"} 2024-12-16T19:47:04,245 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-16T19:47:04,254 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-16T19:47:04,256 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 227 msec 2024-12-16T19:47:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-16T19:47:04,333 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-16T19:47:04,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,335 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,336 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,338 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,339 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:47:04,339 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:47:04,339 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b 2024-12-16T19:47:04,342 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/recovered.edits] 2024-12-16T19:47:04,342 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/recovered.edits] 2024-12-16T19:47:04,342 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/recovered.edits] 2024-12-16T19:47:04,346 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf/7604c463d4514877b41e600d5b897861 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/cf/7604c463d4514877b41e600d5b897861 2024-12-16T19:47:04,346 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf/1eabd562a4ac4aef89d25d6d52bfd7bc to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/cf/1eabd562a4ac4aef89d25d6d52bfd7bc 2024-12-16T19:47:04,346 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/1eabd562a4ac4aef89d25d6d52bfd7bc.e659069ce8fd38dd0b5ece8b797d8834 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/1eabd562a4ac4aef89d25d6d52bfd7bc.e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:47:04,346 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/7604c463d4514877b41e600d5b897861.8a1a24737a4dc7838eb30fb713491018 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/cf/7604c463d4514877b41e600d5b897861.8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:47:04,349 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/recovered.edits/8.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018/recovered.edits/8.seqid 2024-12-16T19:47:04,349 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/recovered.edits/8.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834/recovered.edits/8.seqid 2024-12-16T19:47:04,349 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/recovered.edits/12.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b/recovered.edits/12.seqid 2024-12-16T19:47:04,350 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/8a1a24737a4dc7838eb30fb713491018 2024-12-16T19:47:04,350 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/e659069ce8fd38dd0b5ece8b797d8834 2024-12-16T19:47:04,350 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/4e505bd67acb2881a003b7511655184b 2024-12-16T19:47:04,350 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-16T19:47:04,352 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,354 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-16T19:47:04,356 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-16T19:47:04,356 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,357 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-16T19:47:04,358 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,358 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-16T19:47:04,358 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378424358"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:04,360 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-16T19:47:04,360 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4e505bd67acb2881a003b7511655184b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b.', STARTKEY => '', ENDKEY => ''}] 2024-12-16T19:47:04,360 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-16T19:47:04,360 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378424360"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:04,362 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-16T19:47:04,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,365 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-16T19:47:04,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,365 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-16T19:47:04,365 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data null 2024-12-16T19:47:04,365 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:04,365 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:04,365 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:04,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T19:47:04,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:04,373 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:04,374 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:04,374 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:04,374 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 40 msec 2024-12-16T19:47:04,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-16T19:47:04,468 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-16T19:47:04,469 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,470 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-16T19:47:04,475 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378424474"}]},"ts":"1734378424474"} 2024-12-16T19:47:04,476 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-16T19:47:04,517 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-16T19:47:04,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-16T19:47:04,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, UNASSIGN}] 2024-12-16T19:47:04,523 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, UNASSIGN 2024-12-16T19:47:04,523 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, UNASSIGN 2024-12-16T19:47:04,524 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=84a3ec9f16b896a68170287625bfe495, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:04,525 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=b363d90a34b23056d96e4dd25b341ff7, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:04,526 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:04,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure 84a3ec9f16b896a68170287625bfe495, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:47:04,527 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:04,527 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=131, state=RUNNABLE; CloseRegionProcedure b363d90a34b23056d96e4dd25b341ff7, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:04,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-16T19:47:04,678 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:04,678 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:04,678 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:47:04,678 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:47:04,678 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:04,678 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing b363d90a34b23056d96e4dd25b341ff7, disabling compactions & flushes 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing 84a3ec9f16b896a68170287625bfe495, disabling compactions & flushes 2024-12-16T19:47:04,679 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:47:04,679 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. after waiting 0 ms 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. after waiting 0 ms 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:47:04,679 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:47:04,683 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:47:04,683 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:47:04,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:04,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:04,684 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495. 2024-12-16T19:47:04,684 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7. 2024-12-16T19:47:04,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for 84a3ec9f16b896a68170287625bfe495: 2024-12-16T19:47:04,684 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for b363d90a34b23056d96e4dd25b341ff7: 2024-12-16T19:47:04,685 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:47:04,685 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=b363d90a34b23056d96e4dd25b341ff7, regionState=CLOSED 2024-12-16T19:47:04,686 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed 84a3ec9f16b896a68170287625bfe495 2024-12-16T19:47:04,687 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=84a3ec9f16b896a68170287625bfe495, regionState=CLOSED 2024-12-16T19:47:04,689 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=131 2024-12-16T19:47:04,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=131, state=SUCCESS; CloseRegionProcedure b363d90a34b23056d96e4dd25b341ff7, server=7a6e4bf70377,42861,1734378237024 in 160 msec 2024-12-16T19:47:04,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-16T19:47:04,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=b363d90a34b23056d96e4dd25b341ff7, UNASSIGN in 168 msec 2024-12-16T19:47:04,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure 84a3ec9f16b896a68170287625bfe495, server=7a6e4bf70377,42741,1734378236851 in 162 msec 2024-12-16T19:47:04,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=130 2024-12-16T19:47:04,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=84a3ec9f16b896a68170287625bfe495, UNASSIGN in 170 msec 2024-12-16T19:47:04,698 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-16T19:47:04,698 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 176 msec 2024-12-16T19:47:04,699 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378424699"}]},"ts":"1734378424699"} 2024-12-16T19:47:04,700 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-16T19:47:04,712 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-16T19:47:04,714 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 243 msec 2024-12-16T19:47:04,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-16T19:47:04,777 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-16T19:47:04,778 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,779 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,779 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,780 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,782 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:47:04,782 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495 2024-12-16T19:47:04,783 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/recovered.edits] 2024-12-16T19:47:04,783 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/recovered.edits] 2024-12-16T19:47:04,786 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf/709d837400894cd48dec66c0564e28ce to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/cf/709d837400894cd48dec66c0564e28ce 2024-12-16T19:47:04,786 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf/da61126c3eee46e6b7755254d15c95db to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/cf/da61126c3eee46e6b7755254d15c95db 2024-12-16T19:47:04,789 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495/recovered.edits/9.seqid 2024-12-16T19:47:04,789 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7/recovered.edits/9.seqid 2024-12-16T19:47:04,790 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/84a3ec9f16b896a68170287625bfe495 2024-12-16T19:47:04,790 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithMergeRegion/b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:47:04,790 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-16T19:47:04,790 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-16T19:47:04,791 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-12-16T19:47:04,794 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e20241216b3864d05bbb44974a9665f0b96f4ba33_b363d90a34b23056d96e4dd25b341ff7 2024-12-16T19:47:04,794 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241216aab8e837b9d94726916820b28f1b6d48_84a3ec9f16b896a68170287625bfe495 2024-12-16T19:47:04,795 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-12-16T19:47:04,796 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,800 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-16T19:47:04,800 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-16T19:47:04,800 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-16T19:47:04,802 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-16T19:47:04,803 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,803 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-16T19:47:04,803 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378424803"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:04,803 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378424803"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:04,804 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:47:04,804 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b363d90a34b23056d96e4dd25b341ff7, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1734378402520.b363d90a34b23056d96e4dd25b341ff7.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 84a3ec9f16b896a68170287625bfe495, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1734378402520.84a3ec9f16b896a68170287625bfe495.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:47:04,805 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-16T19:47:04,805 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378424805"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:04,806 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-16T19:47:04,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,806 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,807 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null 2024-12-16T19:47:04,807 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:04,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:04,807 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null 2024-12-16T19:47:04,807 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:04,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-16T19:47:04,815 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,816 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 38 msec 2024-12-16T19:47:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-16T19:47:04,909 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-16T19:47:04,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-16T19:47:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-16T19:47:04,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:04,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-16T19:47:04,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:04,952 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=800 (was 791) Potentially hanging thread: Thread-5096 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: process reaper (pid 70418) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:50168 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46763 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:49250 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:46763 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x736823cc-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:40444 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1778832197_1 at /127.0.0.1:49218 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:38489 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x736823cc-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=820 (was 801) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=838 (was 1042), ProcessCount=14 (was 17), AvailableMemoryMB=1608 (was 2103) 2024-12-16T19:47:04,952 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-16T19:47:04,971 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=800, OpenFileDescriptor=820, MaxFileDescriptor=1048576, SystemLoadAverage=838, ProcessCount=14, AvailableMemoryMB=1607 2024-12-16T19:47:04,971 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-12-16T19:47:04,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:47:04,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:04,974 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:47:04,974 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-16T19:47:04,974 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:47:04,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T19:47:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742193_1369 (size=443) 2024-12-16T19:47:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742193_1369 (size=443) 2024-12-16T19:47:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742193_1369 (size=443) 2024-12-16T19:47:04,983 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cec837acf781e0117df35d27ea71b4bf, NAME => 'testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:04,983 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c8fa4a83c7a3032f869b5bd5a3077741, NAME => 'testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:04,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742194_1370 (size=68) 2024-12-16T19:47:04,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742194_1370 (size=68) 2024-12-16T19:47:04,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742194_1370 (size=68) 2024-12-16T19:47:04,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742195_1371 (size=68) 2024-12-16T19:47:04,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742195_1371 (size=68) 2024-12-16T19:47:04,992 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:04,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742195_1371 (size=68) 2024-12-16T19:47:04,992 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing c8fa4a83c7a3032f869b5bd5a3077741, disabling compactions & flushes 2024-12-16T19:47:04,992 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:04,992 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:04,992 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. after waiting 0 ms 2024-12-16T19:47:04,992 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:04,992 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:04,992 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for c8fa4a83c7a3032f869b5bd5a3077741: 2024-12-16T19:47:04,993 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:04,993 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing cec837acf781e0117df35d27ea71b4bf, disabling compactions & flushes 2024-12-16T19:47:04,993 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:04,993 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:04,993 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. after waiting 0 ms 2024-12-16T19:47:04,993 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:04,993 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:04,993 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for cec837acf781e0117df35d27ea71b4bf: 2024-12-16T19:47:04,994 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:47:04,994 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734378424994"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378424994"}]},"ts":"1734378424994"} 2024-12-16T19:47:04,994 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1734378424994"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378424994"}]},"ts":"1734378424994"} 2024-12-16T19:47:04,996 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:47:04,997 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:47:04,997 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378424997"}]},"ts":"1734378424997"} 2024-12-16T19:47:04,998 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-16T19:47:05,015 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:47:05,017 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:47:05,017 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:47:05,017 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:47:05,017 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:47:05,017 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:47:05,017 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:47:05,017 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:47:05,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, ASSIGN}] 2024-12-16T19:47:05,018 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, ASSIGN 2024-12-16T19:47:05,019 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, ASSIGN 2024-12-16T19:47:05,019 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:47:05,019 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:47:05,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T19:47:05,170 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:47:05,170 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=cec837acf781e0117df35d27ea71b4bf, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:05,170 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=c8fa4a83c7a3032f869b5bd5a3077741, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:05,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure cec837acf781e0117df35d27ea71b4bf, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:47:05,173 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:05,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T19:47:05,325 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:05,326 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:05,329 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,329 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => cec837acf781e0117df35d27ea71b4bf, NAME => 'testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:47:05,329 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:05,329 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => c8fa4a83c7a3032f869b5bd5a3077741, NAME => 'testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:47:05,329 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. service=AccessControlService 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. service=AccessControlService 2024-12-16T19:47:05,330 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:05,330 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,330 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,332 INFO [StoreOpener-cec837acf781e0117df35d27ea71b4bf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,332 INFO [StoreOpener-c8fa4a83c7a3032f869b5bd5a3077741-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,334 INFO [StoreOpener-c8fa4a83c7a3032f869b5bd5a3077741-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c8fa4a83c7a3032f869b5bd5a3077741 columnFamilyName cf 2024-12-16T19:47:05,334 INFO [StoreOpener-cec837acf781e0117df35d27ea71b4bf-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cec837acf781e0117df35d27ea71b4bf columnFamilyName cf 2024-12-16T19:47:05,335 DEBUG [StoreOpener-cec837acf781e0117df35d27ea71b4bf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:05,335 DEBUG [StoreOpener-c8fa4a83c7a3032f869b5bd5a3077741-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:05,336 INFO [StoreOpener-cec837acf781e0117df35d27ea71b4bf-1 {}] regionserver.HStore(327): Store=cec837acf781e0117df35d27ea71b4bf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:05,336 INFO [StoreOpener-c8fa4a83c7a3032f869b5bd5a3077741-1 {}] regionserver.HStore(327): Store=c8fa4a83c7a3032f869b5bd5a3077741/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:05,337 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,337 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,338 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,338 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,340 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,340 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,342 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:05,342 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:05,342 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened c8fa4a83c7a3032f869b5bd5a3077741; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65974239, jitterRate=-0.016907230019569397}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:05,342 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened cec837acf781e0117df35d27ea71b4bf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67085666, jitterRate=-3.4567713737487793E-4}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:05,343 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for c8fa4a83c7a3032f869b5bd5a3077741: 2024-12-16T19:47:05,343 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for cec837acf781e0117df35d27ea71b4bf: 2024-12-16T19:47:05,343 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf., pid=139, masterSystemTime=1734378425325 2024-12-16T19:47:05,343 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741., pid=140, masterSystemTime=1734378425326 2024-12-16T19:47:05,345 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:05,345 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:05,345 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=c8fa4a83c7a3032f869b5bd5a3077741, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:05,345 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,345 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,346 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=cec837acf781e0117df35d27ea71b4bf, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:05,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138 2024-12-16T19:47:05,348 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741, server=7a6e4bf70377,42861,1734378237024 in 173 msec 2024-12-16T19:47:05,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137 2024-12-16T19:47:05,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure cec837acf781e0117df35d27ea71b4bf, server=7a6e4bf70377,42741,1734378236851 in 175 msec 2024-12-16T19:47:05,348 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, ASSIGN in 331 msec 2024-12-16T19:47:05,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-16T19:47:05,349 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, ASSIGN in 331 msec 2024-12-16T19:47:05,349 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:47:05,350 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378425349"}]},"ts":"1734378425349"} 2024-12-16T19:47:05,351 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-16T19:47:05,357 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:47:05,358 DEBUG [PEWorker-3 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-16T19:47:05,360 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T19:47:05,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:05,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:05,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:05,386 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:05,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:05,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:05,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:05,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:05,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 427 msec 2024-12-16T19:47:05,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-16T19:47:05,578 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-16T19:47:05,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-16T19:47:05,581 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,581 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:05,595 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T19:47:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378425595 (current time:1734378425595). 2024-12-16T19:47:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:47:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-16T19:47:05,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:05,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x25baf574 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@82b15d4 2024-12-16T19:47:05,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@386e2dc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:05,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:05,609 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40866, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x25baf574 to 127.0.0.1:62515 2024-12-16T19:47:05,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x015c5b98 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51897bd2 2024-12-16T19:47:05,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18405c1c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:05,632 DEBUG [hconnection-0x13fa0fbb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:05,634 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40868, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:05,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:05,637 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x015c5b98 to 127.0.0.1:62515 2024-12-16T19:47:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:05,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T19:47:05,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:05,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T19:47:05,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-16T19:47:05,643 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:05,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-16T19:47:05,644 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:05,647 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:05,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742196_1372 (size=170) 2024-12-16T19:47:05,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742196_1372 (size=170) 2024-12-16T19:47:05,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742196_1372 (size=170) 2024-12-16T19:47:05,657 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:05,657 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741}] 2024-12-16T19:47:05,658 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,658 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-16T19:47:05,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:05,809 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:05,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-16T19:47:05,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-16T19:47:05,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:05,809 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for c8fa4a83c7a3032f869b5bd5a3077741: 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for cec837acf781e0117df35d27ea71b4bf: 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:47:05,810 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:47:05,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742197_1373 (size=71) 2024-12-16T19:47:05,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742198_1374 (size=71) 2024-12-16T19:47:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742197_1373 (size=71) 2024-12-16T19:47:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742198_1374 (size=71) 2024-12-16T19:47:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742197_1373 (size=71) 2024-12-16T19:47:05,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742198_1374 (size=71) 2024-12-16T19:47:05,817 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-16T19:47:05,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-16T19:47:05,818 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,818 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:05,819 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741 in 161 msec 2024-12-16T19:47:05,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-16T19:47:05,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-16T19:47:05,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,822 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:05,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-16T19:47:05,824 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:05,824 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf in 165 msec 2024-12-16T19:47:05,824 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:05,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:05,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:05,825 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:05,826 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:47:05,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742199_1375 (size=63) 2024-12-16T19:47:05,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742199_1375 (size=63) 2024-12-16T19:47:05,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742199_1375 (size=63) 2024-12-16T19:47:05,835 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:05,835 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-16T19:47:05,836 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-16T19:47:05,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742200_1376 (size=653) 2024-12-16T19:47:05,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742200_1376 (size=653) 2024-12-16T19:47:05,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742200_1376 (size=653) 2024-12-16T19:47:05,850 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:05,854 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:05,855 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-16T19:47:05,856 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:05,857 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-16T19:47:05,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 217 msec 2024-12-16T19:47:05,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-16T19:47:05,946 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-16T19:47:05,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:05,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:05,954 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-16T19:47:05,954 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:05,954 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:05,975 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T19:47:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378425975 (current time:1734378425975). 2024-12-16T19:47:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:47:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-16T19:47:05,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:05,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x706eb507 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@33aaeb66 2024-12-16T19:47:05,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bbdfcea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:05,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:05,999 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x706eb507 to 127.0.0.1:62515 2024-12-16T19:47:06,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:06,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41421136 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@25d723a3 2024-12-16T19:47:06,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4169c15f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:06,017 DEBUG [hconnection-0xf377b7c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:06,018 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:06,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:06,020 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41421136 to 127.0.0.1:62515 2024-12-16T19:47:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:06,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T19:47:06,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-16T19:47:06,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-16T19:47:06,024 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:06,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T19:47:06,025 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:06,027 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:06,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742201_1377 (size=165) 2024-12-16T19:47:06,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742201_1377 (size=165) 2024-12-16T19:47:06,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742201_1377 (size=165) 2024-12-16T19:47:06,034 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:06,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741}] 2024-12-16T19:47:06,035 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:06,035 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:06,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-16T19:47:06,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-16T19:47:06,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-16T19:47:06,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-16T19:47:06,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T19:47:06,186 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:06,186 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:06,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-16T19:47:06,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-16T19:47:06,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:06,187 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:06,187 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing c8fa4a83c7a3032f869b5bd5a3077741 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-16T19:47:06,187 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing cec837acf781e0117df35d27ea71b4bf 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-16T19:47:06,208 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf is 71, key is 06d5d0a0985097b34efc9e5b744fc2ad/cf:q/1734378425949/Put/seqid=0 2024-12-16T19:47:06,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741 is 71, key is 13eda7a51998adbfd5a0e21b70ef9516/cf:q/1734378425950/Put/seqid=0 2024-12-16T19:47:06,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742202_1378 (size=5172) 2024-12-16T19:47:06,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742202_1378 (size=5172) 2024-12-16T19:47:06,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742202_1378 (size=5172) 2024-12-16T19:47:06,224 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:06,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742203_1379 (size=8101) 2024-12-16T19:47:06,230 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:06,231 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742203_1379 (size=8101) 2024-12-16T19:47:06,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742203_1379 (size=8101) 2024-12-16T19:47:06,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/.tmp/cf/a723c1fdb0754e92960c1d7a957b85c5, store: [table=testtb-testExportExpiredSnapshot family=cf region=cec837acf781e0117df35d27ea71b4bf] 2024-12-16T19:47:06,233 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/.tmp/cf/a723c1fdb0754e92960c1d7a957b85c5 is 209, key is 034a42903959705a4bb250b8049b511af/cf:q/1734378425949/Put/seqid=0 2024-12-16T19:47:06,239 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:06,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/.tmp/cf/d4c6b384956a45a9ab1d1bc913bca2c4, store: [table=testtb-testExportExpiredSnapshot family=cf region=c8fa4a83c7a3032f869b5bd5a3077741] 2024-12-16T19:47:06,241 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/.tmp/cf/d4c6b384956a45a9ab1d1bc913bca2c4 is 209, key is 19673581afe2e5b0b412e40b51436eb5b/cf:q/1734378425950/Put/seqid=0 2024-12-16T19:47:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742204_1380 (size=6123) 2024-12-16T19:47:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742204_1380 (size=6123) 2024-12-16T19:47:06,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742204_1380 (size=6123) 2024-12-16T19:47:06,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742205_1381 (size=14792) 2024-12-16T19:47:06,270 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/.tmp/cf/a723c1fdb0754e92960c1d7a957b85c5 2024-12-16T19:47:06,270 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/.tmp/cf/d4c6b384956a45a9ab1d1bc913bca2c4 2024-12-16T19:47:06,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742205_1381 (size=14792) 2024-12-16T19:47:06,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742205_1381 (size=14792) 2024-12-16T19:47:06,286 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/.tmp/cf/d4c6b384956a45a9ab1d1bc913bca2c4 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf/d4c6b384956a45a9ab1d1bc913bca2c4 2024-12-16T19:47:06,287 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/.tmp/cf/a723c1fdb0754e92960c1d7a957b85c5 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf/a723c1fdb0754e92960c1d7a957b85c5 2024-12-16T19:47:06,300 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf/d4c6b384956a45a9ab1d1bc913bca2c4, entries=46, sequenceid=6, filesize=14.4 K 2024-12-16T19:47:06,302 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf/a723c1fdb0754e92960c1d7a957b85c5, entries=4, sequenceid=6, filesize=6.0 K 2024-12-16T19:47:06,302 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for c8fa4a83c7a3032f869b5bd5a3077741 in 115ms, sequenceid=6, compaction requested=false 2024-12-16T19:47:06,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-16T19:47:06,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for c8fa4a83c7a3032f869b5bd5a3077741: 2024-12-16T19:47:06,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. for snaptb0-testExportExpiredSnapshot completed. 2024-12-16T19:47:06,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:06,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf/d4c6b384956a45a9ab1d1bc913bca2c4] hfiles 2024-12-16T19:47:06,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf/d4c6b384956a45a9ab1d1bc913bca2c4 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,304 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for cec837acf781e0117df35d27ea71b4bf in 117ms, sequenceid=6, compaction requested=false 2024-12-16T19:47:06,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for cec837acf781e0117df35d27ea71b4bf: 2024-12-16T19:47:06,304 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. for snaptb0-testExportExpiredSnapshot completed. 2024-12-16T19:47:06,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:06,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf/a723c1fdb0754e92960c1d7a957b85c5] hfiles 2024-12-16T19:47:06,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf/a723c1fdb0754e92960c1d7a957b85c5 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T19:47:06,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742206_1382 (size=110) 2024-12-16T19:47:06,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742206_1382 (size=110) 2024-12-16T19:47:06,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742206_1382 (size=110) 2024-12-16T19:47:06,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742207_1383 (size=110) 2024-12-16T19:47:06,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742207_1383 (size=110) 2024-12-16T19:47:06,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742207_1383 (size=110) 2024-12-16T19:47:06,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:06,340 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-16T19:47:06,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-16T19:47:06,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:06,342 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:06,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure cec837acf781e0117df35d27ea71b4bf in 309 msec 2024-12-16T19:47:06,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T19:47:06,732 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:06,732 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-16T19:47:06,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-16T19:47:06,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:06,733 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:06,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=144 2024-12-16T19:47:06,735 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:06,736 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741 in 700 msec 2024-12-16T19:47:06,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:06,738 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:06,738 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:06,738 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:06,739 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf] hfiles 2024-12-16T19:47:06,739 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:06,740 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:06,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742208_1384 (size=294) 2024-12-16T19:47:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742208_1384 (size=294) 2024-12-16T19:47:06,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742208_1384 (size=294) 2024-12-16T19:47:06,757 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:06,757 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,758 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742209_1385 (size=963) 2024-12-16T19:47:06,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742209_1385 (size=963) 2024-12-16T19:47:06,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742209_1385 (size=963) 2024-12-16T19:47:06,768 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:06,773 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:06,773 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:06,774 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:06,775 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-16T19:47:06,783 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 753 msec 2024-12-16T19:47:07,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-16T19:47:07,129 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-16T19:47:07,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:47:07,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-16T19:47:07,132 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:47:07,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-16T19:47:07,133 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:47:07,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T19:47:07,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742210_1386 (size=436) 2024-12-16T19:47:07,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742210_1386 (size=436) 2024-12-16T19:47:07,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742210_1386 (size=436) 2024-12-16T19:47:07,142 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9607b295cff8a6b815204957546cba58, NAME => 'testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:07,142 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 34c062451cb346b64f29e9d2d97ae5fd, NAME => 'testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:07,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742211_1387 (size=61) 2024-12-16T19:47:07,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742211_1387 (size=61) 2024-12-16T19:47:07,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742211_1387 (size=61) 2024-12-16T19:47:07,151 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:07,151 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing 9607b295cff8a6b815204957546cba58, disabling compactions & flushes 2024-12-16T19:47:07,151 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,151 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,151 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. after waiting 0 ms 2024-12-16T19:47:07,151 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,151 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,151 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9607b295cff8a6b815204957546cba58: 2024-12-16T19:47:07,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742212_1388 (size=61) 2024-12-16T19:47:07,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742212_1388 (size=61) 2024-12-16T19:47:07,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742212_1388 (size=61) 2024-12-16T19:47:07,189 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:07,189 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 34c062451cb346b64f29e9d2d97ae5fd, disabling compactions & flushes 2024-12-16T19:47:07,189 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,189 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,189 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. after waiting 0 ms 2024-12-16T19:47:07,189 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,189 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,189 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 34c062451cb346b64f29e9d2d97ae5fd: 2024-12-16T19:47:07,191 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:47:07,191 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734378427191"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378427191"}]},"ts":"1734378427191"} 2024-12-16T19:47:07,191 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1734378427191"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378427191"}]},"ts":"1734378427191"} 2024-12-16T19:47:07,194 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:47:07,195 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:47:07,196 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378427195"}]},"ts":"1734378427195"} 2024-12-16T19:47:07,198 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-16T19:47:07,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T19:47:07,240 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:47:07,241 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:47:07,241 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:47:07,241 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:47:07,241 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:47:07,241 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:47:07,241 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:47:07,241 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:47:07,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9607b295cff8a6b815204957546cba58, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=34c062451cb346b64f29e9d2d97ae5fd, ASSIGN}] 2024-12-16T19:47:07,242 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=34c062451cb346b64f29e9d2d97ae5fd, ASSIGN 2024-12-16T19:47:07,242 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9607b295cff8a6b815204957546cba58, ASSIGN 2024-12-16T19:47:07,242 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9607b295cff8a6b815204957546cba58, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:47:07,242 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=34c062451cb346b64f29e9d2d97ae5fd, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:47:07,393 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:47:07,393 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=9607b295cff8a6b815204957546cba58, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:07,393 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=34c062451cb346b64f29e9d2d97ae5fd, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:07,395 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=148, state=RUNNABLE; OpenRegionProcedure 9607b295cff8a6b815204957546cba58, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:47:07,396 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE; OpenRegionProcedure 34c062451cb346b64f29e9d2d97ae5fd, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:07,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T19:47:07,547 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:07,548 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:07,553 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,553 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => 9607b295cff8a6b815204957546cba58, NAME => 'testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:47:07,553 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 34c062451cb346b64f29e9d2d97ae5fd, NAME => 'testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. service=AccessControlService 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. service=AccessControlService 2024-12-16T19:47:07,554 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:07,554 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,554 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,555 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,556 INFO [StoreOpener-9607b295cff8a6b815204957546cba58-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,556 INFO [StoreOpener-34c062451cb346b64f29e9d2d97ae5fd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,558 INFO [StoreOpener-34c062451cb346b64f29e9d2d97ae5fd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 34c062451cb346b64f29e9d2d97ae5fd columnFamilyName cf 2024-12-16T19:47:07,558 INFO [StoreOpener-9607b295cff8a6b815204957546cba58-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9607b295cff8a6b815204957546cba58 columnFamilyName cf 2024-12-16T19:47:07,559 DEBUG [StoreOpener-34c062451cb346b64f29e9d2d97ae5fd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:07,559 DEBUG [StoreOpener-9607b295cff8a6b815204957546cba58-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:07,559 INFO [StoreOpener-34c062451cb346b64f29e9d2d97ae5fd-1 {}] regionserver.HStore(327): Store=34c062451cb346b64f29e9d2d97ae5fd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:07,559 INFO [StoreOpener-9607b295cff8a6b815204957546cba58-1 {}] regionserver.HStore(327): Store=9607b295cff8a6b815204957546cba58/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:07,560 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,560 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,560 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,560 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,562 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,562 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,564 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:07,564 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:07,564 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened 9607b295cff8a6b815204957546cba58; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68784736, jitterRate=0.02497243881225586}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:07,564 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 34c062451cb346b64f29e9d2d97ae5fd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64840613, jitterRate=-0.03379957377910614}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:07,565 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for 9607b295cff8a6b815204957546cba58: 2024-12-16T19:47:07,565 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 34c062451cb346b64f29e9d2d97ae5fd: 2024-12-16T19:47:07,566 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58., pid=150, masterSystemTime=1734378427546 2024-12-16T19:47:07,566 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd., pid=151, masterSystemTime=1734378427548 2024-12-16T19:47:07,568 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,568 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,568 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=9607b295cff8a6b815204957546cba58, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:07,568 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,568 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,568 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=34c062451cb346b64f29e9d2d97ae5fd, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:07,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=148 2024-12-16T19:47:07,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=149 2024-12-16T19:47:07,571 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=148, state=SUCCESS; OpenRegionProcedure 9607b295cff8a6b815204957546cba58, server=7a6e4bf70377,45217,1734378236605 in 174 msec 2024-12-16T19:47:07,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=149, state=SUCCESS; OpenRegionProcedure 34c062451cb346b64f29e9d2d97ae5fd, server=7a6e4bf70377,42861,1734378237024 in 175 msec 2024-12-16T19:47:07,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9607b295cff8a6b815204957546cba58, ASSIGN in 329 msec 2024-12-16T19:47:07,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=147 2024-12-16T19:47:07,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=34c062451cb346b64f29e9d2d97ae5fd, ASSIGN in 330 msec 2024-12-16T19:47:07,572 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:47:07,573 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378427572"}]},"ts":"1734378427572"} 2024-12-16T19:47:07,574 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-16T19:47:07,615 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:47:07,615 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-16T19:47:07,617 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T19:47:07,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:07,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:07,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:07,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:07,657 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,657 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,657 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,657 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,658 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:07,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 527 msec 2024-12-16T19:47:07,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-16T19:47:07,736 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-16T19:47:07,738 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-16T19:47:07,738 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,738 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:07,751 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:07,752 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:07,755 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-16T19:47:07,755 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,755 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:07,763 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-16T19:47:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-16T19:47:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:07,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4f549c70 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52e0c6b0 2024-12-16T19:47:07,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22c4e865, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:07,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:07,775 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4f549c70 to 127.0.0.1:62515 2024-12-16T19:47:07,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:07,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4e618bfc to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1a6001c6 2024-12-16T19:47:07,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fe2452f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:07,793 DEBUG [hconnection-0x5332c678-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:07,794 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:07,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:07,797 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:07,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4e618bfc to 127.0.0.1:62515 2024-12-16T19:47:07,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:07,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-16T19:47:07,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:07,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-16T19:47:07,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-16T19:47:07,800 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:07,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T19:47:07,801 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:07,803 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:07,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742213_1389 (size=152) 2024-12-16T19:47:07,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742213_1389 (size=152) 2024-12-16T19:47:07,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742213_1389 (size=152) 2024-12-16T19:47:07,811 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:07,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 9607b295cff8a6b815204957546cba58}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 34c062451cb346b64f29e9d2d97ae5fd}] 2024-12-16T19:47:07,812 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:07,812 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:07,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T19:47:07,962 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:07,962 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:07,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-16T19:47:07,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-16T19:47:07,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:07,963 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing 9607b295cff8a6b815204957546cba58 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-16T19:47:07,963 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:07,964 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 34c062451cb346b64f29e9d2d97ae5fd 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-16T19:47:07,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412167ec97751f8ba4e789f76f9892a4301d0_34c062451cb346b64f29e9d2d97ae5fd is 71, key is 1a4b3b3a9fa0f2d8dad411719fcf233a/cf:q/1734378427752/Put/seqid=0 2024-12-16T19:47:08,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c188aebdd3b74006b6a38fee9fe5c9c0_9607b295cff8a6b815204957546cba58 is 71, key is 042125cf27ce5116ebbe832f2c131233/cf:q/1734378427751/Put/seqid=0 2024-12-16T19:47:08,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742215_1391 (size=5241) 2024-12-16T19:47:08,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742215_1391 (size=5241) 2024-12-16T19:47:08,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742215_1391 (size=5241) 2024-12-16T19:47:08,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:08,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742214_1390 (size=8032) 2024-12-16T19:47:08,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742214_1390 (size=8032) 2024-12-16T19:47:08,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742214_1390 (size=8032) 2024-12-16T19:47:08,025 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c188aebdd3b74006b6a38fee9fe5c9c0_9607b295cff8a6b815204957546cba58 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241216c188aebdd3b74006b6a38fee9fe5c9c0_9607b295cff8a6b815204957546cba58 2024-12-16T19:47:08,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:08,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/.tmp/cf/bc04a51e1512447c98b4f65e774099dc, store: [table=testExportExpiredSnapshot family=cf region=9607b295cff8a6b815204957546cba58] 2024-12-16T19:47:08,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/.tmp/cf/bc04a51e1512447c98b4f65e774099dc is 202, key is 09fe3479e9c3cbd466e50e1b1b4dac468/cf:q/1734378427751/Put/seqid=0 2024-12-16T19:47:08,037 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202412167ec97751f8ba4e789f76f9892a4301d0_34c062451cb346b64f29e9d2d97ae5fd to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b202412167ec97751f8ba4e789f76f9892a4301d0_34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:08,038 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/.tmp/cf/23e0a09cc9664ec088431cd5a0237de1, store: [table=testExportExpiredSnapshot family=cf region=34c062451cb346b64f29e9d2d97ae5fd] 2024-12-16T19:47:08,039 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/.tmp/cf/23e0a09cc9664ec088431cd5a0237de1 is 202, key is 11af4531ae0752ac13eef045dc06ed8eb/cf:q/1734378427752/Put/seqid=0 2024-12-16T19:47:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742216_1392 (size=6284) 2024-12-16T19:47:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742216_1392 (size=6284) 2024-12-16T19:47:08,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742216_1392 (size=6284) 2024-12-16T19:47:08,055 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/.tmp/cf/bc04a51e1512447c98b4f65e774099dc 2024-12-16T19:47:08,062 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/.tmp/cf/bc04a51e1512447c98b4f65e774099dc as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/cf/bc04a51e1512447c98b4f65e774099dc 2024-12-16T19:47:08,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742217_1393 (size=14267) 2024-12-16T19:47:08,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742217_1393 (size=14267) 2024-12-16T19:47:08,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742217_1393 (size=14267) 2024-12-16T19:47:08,068 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/.tmp/cf/23e0a09cc9664ec088431cd5a0237de1 2024-12-16T19:47:08,072 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/cf/bc04a51e1512447c98b4f65e774099dc, entries=5, sequenceid=5, filesize=6.1 K 2024-12-16T19:47:08,077 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/.tmp/cf/23e0a09cc9664ec088431cd5a0237de1 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/cf/23e0a09cc9664ec088431cd5a0237de1 2024-12-16T19:47:08,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 9607b295cff8a6b815204957546cba58 in 115ms, sequenceid=5, compaction requested=false 2024-12-16T19:47:08,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-16T19:47:08,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for 9607b295cff8a6b815204957546cba58: 2024-12-16T19:47:08,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. for snapshot-testExportExpiredSnapshot completed. 2024-12-16T19:47:08,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:08,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/cf/bc04a51e1512447c98b4f65e774099dc] hfiles 2024-12-16T19:47:08,079 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/cf/bc04a51e1512447c98b4f65e774099dc for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,083 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/cf/23e0a09cc9664ec088431cd5a0237de1, entries=45, sequenceid=5, filesize=13.9 K 2024-12-16T19:47:08,086 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 34c062451cb346b64f29e9d2d97ae5fd in 123ms, sequenceid=5, compaction requested=false 2024-12-16T19:47:08,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 34c062451cb346b64f29e9d2d97ae5fd: 2024-12-16T19:47:08,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. for snapshot-testExportExpiredSnapshot completed. 2024-12-16T19:47:08,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:08,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/cf/23e0a09cc9664ec088431cd5a0237de1] hfiles 2024-12-16T19:47:08,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/cf/23e0a09cc9664ec088431cd5a0237de1 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742218_1394 (size=103) 2024-12-16T19:47:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742218_1394 (size=103) 2024-12-16T19:47:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T19:47:08,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742218_1394 (size=103) 2024-12-16T19:47:08,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:47:08,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-16T19:47:08,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-16T19:47:08,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:08,105 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 9607b295cff8a6b815204957546cba58 2024-12-16T19:47:08,112 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure 9607b295cff8a6b815204957546cba58 in 299 msec 2024-12-16T19:47:08,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742219_1395 (size=103) 2024-12-16T19:47:08,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742219_1395 (size=103) 2024-12-16T19:47:08,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742219_1395 (size=103) 2024-12-16T19:47:08,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:47:08,126 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-16T19:47:08,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-16T19:47:08,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:08,127 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:08,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=152 2024-12-16T19:47:08,138 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:08,138 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure 34c062451cb346b64f29e9d2d97ae5fd in 318 msec 2024-12-16T19:47:08,140 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:08,143 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:08,143 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:08,143 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:08,146 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b202412167ec97751f8ba4e789f76f9892a4301d0_34c062451cb346b64f29e9d2d97ae5fd, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241216c188aebdd3b74006b6a38fee9fe5c9c0_9607b295cff8a6b815204957546cba58] hfiles 2024-12-16T19:47:08,146 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b202412167ec97751f8ba4e789f76f9892a4301d0_34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:47:08,146 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241216c188aebdd3b74006b6a38fee9fe5c9c0_9607b295cff8a6b815204957546cba58 2024-12-16T19:47:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742220_1396 (size=287) 2024-12-16T19:47:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742220_1396 (size=287) 2024-12-16T19:47:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742220_1396 (size=287) 2024-12-16T19:47:08,168 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:08,168 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,169 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742221_1397 (size=935) 2024-12-16T19:47:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742221_1397 (size=935) 2024-12-16T19:47:08,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742221_1397 (size=935) 2024-12-16T19:47:08,183 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:08,188 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:08,188 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-16T19:47:08,190 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:08,190 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-16T19:47:08,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 391 msec 2024-12-16T19:47:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-16T19:47:08,404 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-16T19:47:08,855 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0006_000001 (auth:SIMPLE) from 127.0.0.1:38646 2024-12-16T19:47:08,875 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0006/container_1734378249643_0006_01_000001/launch_container.sh] 2024-12-16T19:47:08,876 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0006/container_1734378249643_0006_01_000001/container_tokens] 2024-12-16T19:47:08,876 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0006/container_1734378249643_0006_01_000001/sysfs] 2024-12-16T19:47:09,760 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:47:16,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-16T19:47:16,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-16T19:47:18,412 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378438412 2024-12-16T19:47:18,412 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378438412, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378438412, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:18,465 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:18,465 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378438412, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378438412/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-16T19:47:18,469 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:47:18,471 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:47:18,473 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-16T19:47:18,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-16T19:47:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T19:47:18,477 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378438477"}]},"ts":"1734378438477"} 2024-12-16T19:47:18,479 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-16T19:47:18,489 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-16T19:47:18,491 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-16T19:47:18,495 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, UNASSIGN}] 2024-12-16T19:47:18,496 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, UNASSIGN 2024-12-16T19:47:18,496 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, UNASSIGN 2024-12-16T19:47:18,505 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=cec837acf781e0117df35d27ea71b4bf, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:18,505 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=c8fa4a83c7a3032f869b5bd5a3077741, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:18,507 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:18,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=157, state=RUNNABLE; CloseRegionProcedure cec837acf781e0117df35d27ea71b4bf, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:47:18,508 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:18,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=158, state=RUNNABLE; CloseRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T19:47:18,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:18,660 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:18,660 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:18,660 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing cec837acf781e0117df35d27ea71b4bf, disabling compactions & flushes 2024-12-16T19:47:18,660 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:18,660 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:18,660 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. after waiting 0 ms 2024-12-16T19:47:18,660 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:18,661 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:18,662 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:18,662 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:18,662 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing c8fa4a83c7a3032f869b5bd5a3077741, disabling compactions & flushes 2024-12-16T19:47:18,662 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:18,662 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:18,662 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. after waiting 0 ms 2024-12-16T19:47:18,662 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:18,719 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:47:18,720 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:18,720 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf. 2024-12-16T19:47:18,720 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for cec837acf781e0117df35d27ea71b4bf: 2024-12-16T19:47:18,723 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:18,723 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=cec837acf781e0117df35d27ea71b4bf, regionState=CLOSED 2024-12-16T19:47:18,732 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=157 2024-12-16T19:47:18,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=157, state=SUCCESS; CloseRegionProcedure cec837acf781e0117df35d27ea71b4bf, server=7a6e4bf70377,42741,1734378236851 in 222 msec 2024-12-16T19:47:18,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=cec837acf781e0117df35d27ea71b4bf, UNASSIGN in 238 msec 2024-12-16T19:47:18,736 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:47:18,737 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:18,737 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741. 2024-12-16T19:47:18,737 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for c8fa4a83c7a3032f869b5bd5a3077741: 2024-12-16T19:47:18,741 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=c8fa4a83c7a3032f869b5bd5a3077741, regionState=CLOSED 2024-12-16T19:47:18,742 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:18,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=158 2024-12-16T19:47:18,756 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=158, state=SUCCESS; CloseRegionProcedure c8fa4a83c7a3032f869b5bd5a3077741, server=7a6e4bf70377,42861,1734378237024 in 238 msec 2024-12-16T19:47:18,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-12-16T19:47:18,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c8fa4a83c7a3032f869b5bd5a3077741, UNASSIGN in 261 msec 2024-12-16T19:47:18,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-16T19:47:18,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 280 msec 2024-12-16T19:47:18,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T19:47:18,780 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378438780"}]},"ts":"1734378438780"} 2024-12-16T19:47:18,782 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-16T19:47:18,789 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-16T19:47:18,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 321 msec 2024-12-16T19:47:19,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-16T19:47:19,086 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-16T19:47:19,087 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,094 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,098 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,098 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,102 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:19,102 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:19,107 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/recovered.edits] 2024-12-16T19:47:19,114 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/recovered.edits] 2024-12-16T19:47:19,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-16T19:47:19,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,120 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf/d4c6b384956a45a9ab1d1bc913bca2c4 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/cf/d4c6b384956a45a9ab1d1bc913bca2c4 2024-12-16T19:47:19,122 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-16T19:47:19,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,126 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf/a723c1fdb0754e92960c1d7a957b85c5 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/cf/a723c1fdb0754e92960c1d7a957b85c5 2024-12-16T19:47:19,132 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741/recovered.edits/9.seqid 2024-12-16T19:47:19,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:19,134 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:19,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-16T19:47:19,134 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:19,134 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-16T19:47:19,134 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:19,137 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:19,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:19,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,142 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:19,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:19,146 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:19,147 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:19,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T19:47:19,150 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf/recovered.edits/9.seqid 2024-12-16T19:47:19,153 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:19,155 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportExpiredSnapshot/cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:19,155 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-16T19:47:19,155 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-16T19:47:19,157 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-12-16T19:47:19,194 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241216eb818f5023e94d58afd4161810e95bf3_cec837acf781e0117df35d27ea71b4bf 2024-12-16T19:47:19,194 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b20241216219b062e7c5647e799a74f93184269cd_c8fa4a83c7a3032f869b5bd5a3077741 2024-12-16T19:47:19,196 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-12-16T19:47:19,218 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,232 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-16T19:47:19,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T19:47:19,261 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-16T19:47:19,265 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,265 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-16T19:47:19,266 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378439265"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:19,266 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378439265"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:19,275 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:47:19,275 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cec837acf781e0117df35d27ea71b4bf, NAME => 'testtb-testExportExpiredSnapshot,,1734378424972.cec837acf781e0117df35d27ea71b4bf.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => c8fa4a83c7a3032f869b5bd5a3077741, NAME => 'testtb-testExportExpiredSnapshot,1,1734378424972.c8fa4a83c7a3032f869b5bd5a3077741.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:47:19,275 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-16T19:47:19,275 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378439275"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:19,278 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-16T19:47:19,288 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-16T19:47:19,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 201 msec 2024-12-16T19:47:19,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-16T19:47:19,449 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-16T19:47:19,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-16T19:47:19,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-16T19:47:19,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-16T19:47:19,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-16T19:47:19,494 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-16T19:47:19,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-16T19:47:19,574 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=798 (was 800), OpenFileDescriptor=797 (was 820), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=875 (was 838) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 14), AvailableMemoryMB=2329 (was 1607) - AvailableMemoryMB LEAK? - 2024-12-16T19:47:19,574 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-16T19:47:19,628 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=798, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=875, ProcessCount=14, AvailableMemoryMB=2326 2024-12-16T19:47:19,628 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=798 is superior to 500 2024-12-16T19:47:19,635 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:47:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:19,654 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:47:19,654 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-16T19:47:19,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T19:47:19,662 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:47:19,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742222_1398 (size=448) 2024-12-16T19:47:19,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742222_1398 (size=448) 2024-12-16T19:47:19,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742222_1398 (size=448) 2024-12-16T19:47:19,727 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4f9df011687dc0e0feacf03cbc677973, NAME => 'testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:19,742 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => b86b582edae56e07d8340b1ef1f3a708, NAME => 'testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T19:47:19,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742224_1400 (size=73) 2024-12-16T19:47:19,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742224_1400 (size=73) 2024-12-16T19:47:19,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742224_1400 (size=73) 2024-12-16T19:47:19,873 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:19,873 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing b86b582edae56e07d8340b1ef1f3a708, disabling compactions & flushes 2024-12-16T19:47:19,873 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:19,873 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:19,873 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. after waiting 0 ms 2024-12-16T19:47:19,874 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:19,874 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:19,874 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for b86b582edae56e07d8340b1ef1f3a708: 2024-12-16T19:47:19,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742223_1399 (size=73) 2024-12-16T19:47:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742223_1399 (size=73) 2024-12-16T19:47:19,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742223_1399 (size=73) 2024-12-16T19:47:19,886 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:19,886 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 4f9df011687dc0e0feacf03cbc677973, disabling compactions & flushes 2024-12-16T19:47:19,886 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:19,886 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:19,886 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. after waiting 0 ms 2024-12-16T19:47:19,886 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:19,886 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:19,886 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4f9df011687dc0e0feacf03cbc677973: 2024-12-16T19:47:19,890 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:47:19,890 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734378439890"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378439890"}]},"ts":"1734378439890"} 2024-12-16T19:47:19,890 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1734378439890"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378439890"}]},"ts":"1734378439890"} 2024-12-16T19:47:19,906 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:47:19,908 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:47:19,908 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378439908"}]},"ts":"1734378439908"} 2024-12-16T19:47:19,913 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-16T19:47:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T19:47:20,049 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:47:20,051 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:47:20,051 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:47:20,051 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:47:20,051 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:47:20,051 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:47:20,051 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:47:20,051 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:47:20,051 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, ASSIGN}] 2024-12-16T19:47:20,054 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, ASSIGN 2024-12-16T19:47:20,054 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, ASSIGN 2024-12-16T19:47:20,055 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:47:20,055 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:47:20,205 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:47:20,206 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b86b582edae56e07d8340b1ef1f3a708, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:20,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=4f9df011687dc0e0feacf03cbc677973, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:20,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; OpenRegionProcedure b86b582edae56e07d8340b1ef1f3a708, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:20,209 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=163, state=RUNNABLE; OpenRegionProcedure 4f9df011687dc0e0feacf03cbc677973, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:47:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T19:47:20,360 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:20,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:20,368 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:20,368 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 4f9df011687dc0e0feacf03cbc677973, NAME => 'testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:47:20,368 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. service=AccessControlService 2024-12-16T19:47:20,368 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:20,368 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,369 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:20,369 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,369 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,369 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:20,369 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => b86b582edae56e07d8340b1ef1f3a708, NAME => 'testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:47:20,370 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. service=AccessControlService 2024-12-16T19:47:20,370 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:20,370 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,370 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:20,370 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,370 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,376 INFO [StoreOpener-4f9df011687dc0e0feacf03cbc677973-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,378 INFO [StoreOpener-4f9df011687dc0e0feacf03cbc677973-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4f9df011687dc0e0feacf03cbc677973 columnFamilyName cf 2024-12-16T19:47:20,386 INFO [StoreOpener-b86b582edae56e07d8340b1ef1f3a708-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,398 DEBUG [StoreOpener-4f9df011687dc0e0feacf03cbc677973-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:20,402 INFO [StoreOpener-4f9df011687dc0e0feacf03cbc677973-1 {}] regionserver.HStore(327): Store=4f9df011687dc0e0feacf03cbc677973/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:20,404 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,406 INFO [StoreOpener-b86b582edae56e07d8340b1ef1f3a708-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b86b582edae56e07d8340b1ef1f3a708 columnFamilyName cf 2024-12-16T19:47:20,408 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,410 DEBUG [StoreOpener-b86b582edae56e07d8340b1ef1f3a708-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:20,418 INFO [StoreOpener-b86b582edae56e07d8340b1ef1f3a708-1 {}] regionserver.HStore(327): Store=b86b582edae56e07d8340b1ef1f3a708/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:20,422 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,426 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,433 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,433 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,446 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:20,450 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 4f9df011687dc0e0feacf03cbc677973; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60458337, jitterRate=-0.09910057485103607}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:20,451 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 4f9df011687dc0e0feacf03cbc677973: 2024-12-16T19:47:20,451 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:20,452 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened b86b582edae56e07d8340b1ef1f3a708; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71408735, jitterRate=0.06407307088375092}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:20,452 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for b86b582edae56e07d8340b1ef1f3a708: 2024-12-16T19:47:20,458 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973., pid=166, masterSystemTime=1734378440362 2024-12-16T19:47:20,458 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708., pid=165, masterSystemTime=1734378440360 2024-12-16T19:47:20,466 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:20,466 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:20,474 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=4f9df011687dc0e0feacf03cbc677973, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:20,476 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:20,476 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:20,479 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b86b582edae56e07d8340b1ef1f3a708, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:20,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=163 2024-12-16T19:47:20,481 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=163, state=SUCCESS; OpenRegionProcedure 4f9df011687dc0e0feacf03cbc677973, server=7a6e4bf70377,42741,1734378236851 in 268 msec 2024-12-16T19:47:20,481 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=7a6e4bf70377,42861,1734378237024, table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-16T19:47:20,483 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, ASSIGN in 430 msec 2024-12-16T19:47:20,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-16T19:47:20,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; OpenRegionProcedure b86b582edae56e07d8340b1ef1f3a708, server=7a6e4bf70377,42861,1734378237024 in 275 msec 2024-12-16T19:47:20,486 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-12-16T19:47:20,486 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, ASSIGN in 433 msec 2024-12-16T19:47:20,487 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:47:20,487 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378440487"}]},"ts":"1734378440487"} 2024-12-16T19:47:20,489 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-16T19:47:20,502 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:47:20,502 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-16T19:47:20,507 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T19:47:20,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:20,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:20,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:20,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:20,571 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:20,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:20,589 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,590 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,591 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,591 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:20,592 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 955 msec 2024-12-16T19:47:20,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-16T19:47:20,764 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-16T19:47:20,767 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-16T19:47:20,767 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:20,767 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:20,787 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:47:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378440787 (current time:1734378440787). 2024-12-16T19:47:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:47:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-16T19:47:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:20,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x276181b0 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a8178d5 2024-12-16T19:47:20,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2017bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:20,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:20,806 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54128, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:20,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x276181b0 to 127.0.0.1:62515 2024-12-16T19:47:20,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:20,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x65d8267e to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54aeadc0 2024-12-16T19:47:20,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e89e521, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:20,834 DEBUG [hconnection-0x5ab2555e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:20,835 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54140, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:20,839 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42786, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:20,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x65d8267e to 127.0.0.1:62515 2024-12-16T19:47:20,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:20,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T19:47:20,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:20,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:47:20,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-16T19:47:20,844 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:20,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T19:47:20,845 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:20,847 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:20,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742225_1401 (size=185) 2024-12-16T19:47:20,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742225_1401 (size=185) 2024-12-16T19:47:20,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742225_1401 (size=185) 2024-12-16T19:47:20,863 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:20,863 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708}] 2024-12-16T19:47:20,864 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:20,864 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:20,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T19:47:21,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:21,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:21,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-16T19:47:21,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for b86b582edae56e07d8340b1ef1f3a708: 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 4f9df011687dc0e0feacf03cbc677973: 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:47:21,016 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:47:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742226_1402 (size=76) 2024-12-16T19:47:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742226_1402 (size=76) 2024-12-16T19:47:21,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742226_1402 (size=76) 2024-12-16T19:47:21,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742227_1403 (size=76) 2024-12-16T19:47:21,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:21,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742227_1403 (size=76) 2024-12-16T19:47:21,024 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-16T19:47:21,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742227_1403 (size=76) 2024-12-16T19:47:21,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-16T19:47:21,024 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:21,024 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:21,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708 in 162 msec 2024-12-16T19:47:21,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T19:47:21,424 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:21,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-16T19:47:21,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-16T19:47:21,425 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:21,425 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:21,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-16T19:47:21,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973 in 566 msec 2024-12-16T19:47:21,438 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:21,439 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:21,440 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:21,440 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:21,440 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:21,441 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:47:21,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T19:47:21,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742228_1404 (size=68) 2024-12-16T19:47:21,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742228_1404 (size=68) 2024-12-16T19:47:21,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742228_1404 (size=68) 2024-12-16T19:47:21,466 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:21,466 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:21,467 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:21,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742229_1405 (size=673) 2024-12-16T19:47:21,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742229_1405 (size=673) 2024-12-16T19:47:21,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742229_1405 (size=673) 2024-12-16T19:47:21,516 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:21,529 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:21,534 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:21,536 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:21,536 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-16T19:47:21,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 694 msec 2024-12-16T19:47:21,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-16T19:47:21,951 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-16T19:47:21,955 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:21,958 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:21,971 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-16T19:47:21,971 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:21,971 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:22,012 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:47:22,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378442012 (current time:1734378442012). 2024-12-16T19:47:22,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:47:22,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-16T19:47:22,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:22,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7e9d572d to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@364df505 2024-12-16T19:47:22,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23b8f00f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:22,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:22,026 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:22,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7e9d572d to 127.0.0.1:62515 2024-12-16T19:47:22,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08a3e9eb to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@511fc027 2024-12-16T19:47:22,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35436a83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:22,100 DEBUG [hconnection-0x36d85df9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:22,101 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:22,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:22,107 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42802, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08a3e9eb to 127.0.0.1:62515 2024-12-16T19:47:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:22,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-16T19:47:22,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-16T19:47:22,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-16T19:47:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T19:47:22,111 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:22,113 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:22,115 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:22,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742230_1406 (size=180) 2024-12-16T19:47:22,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742230_1406 (size=180) 2024-12-16T19:47:22,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742230_1406 (size=180) 2024-12-16T19:47:22,132 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:22,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708}] 2024-12-16T19:47:22,133 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:22,133 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:22,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T19:47:22,284 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:22,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:22,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-16T19:47:22,285 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-16T19:47:22,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:22,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:22,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 4f9df011687dc0e0feacf03cbc677973 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-16T19:47:22,285 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing b86b582edae56e07d8340b1ef1f3a708 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-16T19:47:22,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708 is 71, key is 160d1a4e717728601dc8e3ed3fe168b5/cf:q/1734378441955/Put/seqid=0 2024-12-16T19:47:22,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973 is 71, key is 0031ffbe5064117bc059bf04ee1b27c1/cf:q/1734378441958/Put/seqid=0 2024-12-16T19:47:22,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742231_1407 (size=8102) 2024-12-16T19:47:22,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742231_1407 (size=8102) 2024-12-16T19:47:22,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742231_1407 (size=8102) 2024-12-16T19:47:22,326 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:22,342 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:22,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/.tmp/cf/3cc0d651249c4053963a04c629d1240d, store: [table=testtb-testEmptyExportFileSystemState family=cf region=b86b582edae56e07d8340b1ef1f3a708] 2024-12-16T19:47:22,344 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/.tmp/cf/3cc0d651249c4053963a04c629d1240d is 214, key is 104c0cdaedfd3e5af7c35e078e4dae208/cf:q/1734378441955/Put/seqid=0 2024-12-16T19:47:22,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742232_1408 (size=5171) 2024-12-16T19:47:22,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742232_1408 (size=5171) 2024-12-16T19:47:22,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742232_1408 (size=5171) 2024-12-16T19:47:22,347 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:22,351 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:22,352 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/.tmp/cf/95049c464e7f45ef83446bfb17a3c1b2, store: [table=testtb-testEmptyExportFileSystemState family=cf region=4f9df011687dc0e0feacf03cbc677973] 2024-12-16T19:47:22,353 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/.tmp/cf/95049c464e7f45ef83446bfb17a3c1b2 is 214, key is 0304d11c3a03236e96a433b3fb0c53b11/cf:q/1734378441958/Put/seqid=0 2024-12-16T19:47:22,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742233_1409 (size=15029) 2024-12-16T19:47:22,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742233_1409 (size=15029) 2024-12-16T19:47:22,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742233_1409 (size=15029) 2024-12-16T19:47:22,354 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/.tmp/cf/3cc0d651249c4053963a04c629d1240d 2024-12-16T19:47:22,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742234_1410 (size=6146) 2024-12-16T19:47:22,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742234_1410 (size=6146) 2024-12-16T19:47:22,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742234_1410 (size=6146) 2024-12-16T19:47:22,358 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/.tmp/cf/95049c464e7f45ef83446bfb17a3c1b2 2024-12-16T19:47:22,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/.tmp/cf/3cc0d651249c4053963a04c629d1240d as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf/3cc0d651249c4053963a04c629d1240d 2024-12-16T19:47:22,371 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/.tmp/cf/95049c464e7f45ef83446bfb17a3c1b2 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf/95049c464e7f45ef83446bfb17a3c1b2 2024-12-16T19:47:22,371 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf/3cc0d651249c4053963a04c629d1240d, entries=46, sequenceid=6, filesize=14.7 K 2024-12-16T19:47:22,372 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for b86b582edae56e07d8340b1ef1f3a708 in 87ms, sequenceid=6, compaction requested=false 2024-12-16T19:47:22,372 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-16T19:47:22,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for b86b582edae56e07d8340b1ef1f3a708: 2024-12-16T19:47:22,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-16T19:47:22,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:22,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf/3cc0d651249c4053963a04c629d1240d] hfiles 2024-12-16T19:47:22,373 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf/3cc0d651249c4053963a04c629d1240d for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742235_1411 (size=115) 2024-12-16T19:47:22,383 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf/95049c464e7f45ef83446bfb17a3c1b2, entries=4, sequenceid=6, filesize=6.0 K 2024-12-16T19:47:22,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742235_1411 (size=115) 2024-12-16T19:47:22,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742235_1411 (size=115) 2024-12-16T19:47:22,384 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 4f9df011687dc0e0feacf03cbc677973 in 99ms, sequenceid=6, compaction requested=false 2024-12-16T19:47:22,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 4f9df011687dc0e0feacf03cbc677973: 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf/95049c464e7f45ef83446bfb17a3c1b2] hfiles 2024-12-16T19:47:22,385 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf/95049c464e7f45ef83446bfb17a3c1b2 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-16T19:47:22,385 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:22,385 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:22,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure b86b582edae56e07d8340b1ef1f3a708 in 254 msec 2024-12-16T19:47:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742236_1412 (size=115) 2024-12-16T19:47:22,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742236_1412 (size=115) 2024-12-16T19:47:22,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742236_1412 (size=115) 2024-12-16T19:47:22,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:22,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-16T19:47:22,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-16T19:47:22,391 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:22,391 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:22,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-16T19:47:22,393 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:22,393 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 4f9df011687dc0e0feacf03cbc677973 in 260 msec 2024-12-16T19:47:22,393 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:22,394 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:22,394 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:22,394 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:22,395 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973] hfiles 2024-12-16T19:47:22,396 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:22,396 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742237_1413 (size=299) 2024-12-16T19:47:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742237_1413 (size=299) 2024-12-16T19:47:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742237_1413 (size=299) 2024-12-16T19:47:22,404 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:22,405 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,406 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T19:47:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742238_1414 (size=983) 2024-12-16T19:47:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742238_1414 (size=983) 2024-12-16T19:47:22,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742238_1414 (size=983) 2024-12-16T19:47:22,430 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:22,440 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:22,441 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,442 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:22,442 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-16T19:47:22,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 333 msec 2024-12-16T19:47:22,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-16T19:47:22,714 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-16T19:47:22,714 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714 2024-12-16T19:47:22,714 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:22,758 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:22,758 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,760 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:47:22,774 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:22,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742239_1415 (size=185) 2024-12-16T19:47:22,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742239_1415 (size=185) 2024-12-16T19:47:22,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742239_1415 (size=185) 2024-12-16T19:47:22,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742240_1416 (size=673) 2024-12-16T19:47:22,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742240_1416 (size=673) 2024-12-16T19:47:22,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742240_1416 (size=673) 2024-12-16T19:47:22,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:22,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:22,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:22,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,784 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-1796321746505939640.jar 2024-12-16T19:47:23,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,785 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,843 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-1343382017429980211.jar 2024-12-16T19:47:23,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,844 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:23,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:47:23,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:47:23,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:47:23,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:47:23,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:47:23,845 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:47:23,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:47:23,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:47:23,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:47:23,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:47:23,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:47:23,846 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:47:23,847 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:47:23,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742241_1417 (size=127628) 2024-12-16T19:47:23,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742241_1417 (size=127628) 2024-12-16T19:47:23,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742241_1417 (size=127628) 2024-12-16T19:47:23,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742242_1418 (size=2172137) 2024-12-16T19:47:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742242_1418 (size=2172137) 2024-12-16T19:47:23,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742242_1418 (size=2172137) 2024-12-16T19:47:23,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742243_1419 (size=213228) 2024-12-16T19:47:23,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742243_1419 (size=213228) 2024-12-16T19:47:23,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742243_1419 (size=213228) 2024-12-16T19:47:23,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742244_1420 (size=1877034) 2024-12-16T19:47:23,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742244_1420 (size=1877034) 2024-12-16T19:47:23,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742244_1420 (size=1877034) 2024-12-16T19:47:23,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742245_1421 (size=533455) 2024-12-16T19:47:23,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742245_1421 (size=533455) 2024-12-16T19:47:23,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742245_1421 (size=533455) 2024-12-16T19:47:23,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742246_1422 (size=7280644) 2024-12-16T19:47:23,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742246_1422 (size=7280644) 2024-12-16T19:47:23,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742246_1422 (size=7280644) 2024-12-16T19:47:24,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742247_1423 (size=4188619) 2024-12-16T19:47:24,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742247_1423 (size=4188619) 2024-12-16T19:47:24,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742247_1423 (size=4188619) 2024-12-16T19:47:24,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742248_1424 (size=20406) 2024-12-16T19:47:24,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742248_1424 (size=20406) 2024-12-16T19:47:24,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742248_1424 (size=20406) 2024-12-16T19:47:24,245 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:47:24,256 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:47:24,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742249_1425 (size=75495) 2024-12-16T19:47:24,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742249_1425 (size=75495) 2024-12-16T19:47:24,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742249_1425 (size=75495) 2024-12-16T19:47:24,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742250_1426 (size=45609) 2024-12-16T19:47:24,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742250_1426 (size=45609) 2024-12-16T19:47:24,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742250_1426 (size=45609) 2024-12-16T19:47:24,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742251_1427 (size=110084) 2024-12-16T19:47:24,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742251_1427 (size=110084) 2024-12-16T19:47:24,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742251_1427 (size=110084) 2024-12-16T19:47:24,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742252_1428 (size=1323991) 2024-12-16T19:47:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742252_1428 (size=1323991) 2024-12-16T19:47:24,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742252_1428 (size=1323991) 2024-12-16T19:47:24,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742253_1429 (size=23076) 2024-12-16T19:47:24,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742253_1429 (size=23076) 2024-12-16T19:47:24,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742253_1429 (size=23076) 2024-12-16T19:47:24,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742254_1430 (size=126803) 2024-12-16T19:47:24,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742254_1430 (size=126803) 2024-12-16T19:47:24,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742254_1430 (size=126803) 2024-12-16T19:47:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742255_1431 (size=322274) 2024-12-16T19:47:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742255_1431 (size=322274) 2024-12-16T19:47:24,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742255_1431 (size=322274) 2024-12-16T19:47:24,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742256_1432 (size=451756) 2024-12-16T19:47:24,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742256_1432 (size=451756) 2024-12-16T19:47:24,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742256_1432 (size=451756) 2024-12-16T19:47:25,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742257_1433 (size=1832290) 2024-12-16T19:47:25,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742257_1433 (size=1832290) 2024-12-16T19:47:25,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742257_1433 (size=1832290) 2024-12-16T19:47:25,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742258_1434 (size=30081) 2024-12-16T19:47:25,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742258_1434 (size=30081) 2024-12-16T19:47:25,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742258_1434 (size=30081) 2024-12-16T19:47:25,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742259_1435 (size=53616) 2024-12-16T19:47:25,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742259_1435 (size=53616) 2024-12-16T19:47:25,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742259_1435 (size=53616) 2024-12-16T19:47:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742260_1436 (size=29229) 2024-12-16T19:47:25,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742260_1436 (size=29229) 2024-12-16T19:47:25,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742260_1436 (size=29229) 2024-12-16T19:47:25,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742261_1437 (size=169089) 2024-12-16T19:47:25,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742261_1437 (size=169089) 2024-12-16T19:47:25,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742261_1437 (size=169089) 2024-12-16T19:47:25,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742262_1438 (size=6350922) 2024-12-16T19:47:25,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742262_1438 (size=6350922) 2024-12-16T19:47:25,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742262_1438 (size=6350922) 2024-12-16T19:47:25,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742263_1439 (size=5175431) 2024-12-16T19:47:25,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742263_1439 (size=5175431) 2024-12-16T19:47:25,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742263_1439 (size=5175431) 2024-12-16T19:47:25,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742264_1440 (size=136454) 2024-12-16T19:47:25,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742264_1440 (size=136454) 2024-12-16T19:47:25,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742264_1440 (size=136454) 2024-12-16T19:47:25,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742265_1441 (size=907467) 2024-12-16T19:47:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742265_1441 (size=907467) 2024-12-16T19:47:25,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742265_1441 (size=907467) 2024-12-16T19:47:25,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742266_1442 (size=3317408) 2024-12-16T19:47:25,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742266_1442 (size=3317408) 2024-12-16T19:47:25,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742266_1442 (size=3317408) 2024-12-16T19:47:25,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742267_1443 (size=503880) 2024-12-16T19:47:25,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742267_1443 (size=503880) 2024-12-16T19:47:25,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742267_1443 (size=503880) 2024-12-16T19:47:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742268_1444 (size=4695811) 2024-12-16T19:47:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742268_1444 (size=4695811) 2024-12-16T19:47:25,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742268_1444 (size=4695811) 2024-12-16T19:47:25,551 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:47:25,553 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-16T19:47:25,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742269_1445 (size=7) 2024-12-16T19:47:25,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742269_1445 (size=7) 2024-12-16T19:47:25,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742269_1445 (size=7) 2024-12-16T19:47:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742270_1446 (size=10) 2024-12-16T19:47:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742270_1446 (size=10) 2024-12-16T19:47:25,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742270_1446 (size=10) 2024-12-16T19:47:25,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742271_1447 (size=304782) 2024-12-16T19:47:25,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742271_1447 (size=304782) 2024-12-16T19:47:25,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742271_1447 (size=304782) 2024-12-16T19:47:25,595 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:47:25,595 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:47:25,753 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0007_000001 (auth:SIMPLE) from 127.0.0.1:36190 2024-12-16T19:47:26,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-16T19:47:26,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-16T19:47:26,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-16T19:47:31,542 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:47:31,941 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0007_000001 (auth:SIMPLE) from 127.0.0.1:57324 2024-12-16T19:47:32,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742272_1448 (size=350432) 2024-12-16T19:47:32,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742272_1448 (size=350432) 2024-12-16T19:47:32,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742272_1448 (size=350432) 2024-12-16T19:47:33,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742273_1449 (size=8568) 2024-12-16T19:47:33,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742273_1449 (size=8568) 2024-12-16T19:47:33,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742273_1449 (size=8568) 2024-12-16T19:47:33,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742274_1450 (size=460) 2024-12-16T19:47:33,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742274_1450 (size=460) 2024-12-16T19:47:33,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742274_1450 (size=460) 2024-12-16T19:47:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742275_1451 (size=8568) 2024-12-16T19:47:33,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742275_1451 (size=8568) 2024-12-16T19:47:33,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742275_1451 (size=8568) 2024-12-16T19:47:33,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742276_1452 (size=350432) 2024-12-16T19:47:33,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742276_1452 (size=350432) 2024-12-16T19:47:33,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742276_1452 (size=350432) 2024-12-16T19:47:34,916 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:47:34,918 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:47:34,939 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:34,939 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:47:34,940 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:47:34,941 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:34,943 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-16T19:47:34,943 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-16T19:47:34,943 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:34,945 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-16T19:47:34,945 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378442714/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-16T19:47:34,958 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-16T19:47:34,959 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-16T19:47:34,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-16T19:47:34,966 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378454965"}]},"ts":"1734378454965"} 2024-12-16T19:47:34,967 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-16T19:47:35,005 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-16T19:47:35,007 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-16T19:47:35,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, UNASSIGN}] 2024-12-16T19:47:35,014 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, UNASSIGN 2024-12-16T19:47:35,015 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, UNASSIGN 2024-12-16T19:47:35,015 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=b86b582edae56e07d8340b1ef1f3a708, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:35,016 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=4f9df011687dc0e0feacf03cbc677973, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:35,016 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:35,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE; CloseRegionProcedure b86b582edae56e07d8340b1ef1f3a708, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:35,018 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:47:35,018 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=175, state=RUNNABLE; CloseRegionProcedure 4f9df011687dc0e0feacf03cbc677973, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:47:35,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-16T19:47:35,171 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:35,171 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:47:35,172 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:35,172 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 4f9df011687dc0e0feacf03cbc677973, disabling compactions & flushes 2024-12-16T19:47:35,172 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. after waiting 0 ms 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing b86b582edae56e07d8340b1ef1f3a708, disabling compactions & flushes 2024-12-16T19:47:35,172 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. after waiting 0 ms 2024-12-16T19:47:35,172 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:35,182 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:47:35,183 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:35,183 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973. 2024-12-16T19:47:35,183 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 4f9df011687dc0e0feacf03cbc677973: 2024-12-16T19:47:35,185 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:35,185 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=4f9df011687dc0e0feacf03cbc677973, regionState=CLOSED 2024-12-16T19:47:35,188 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=175 2024-12-16T19:47:35,188 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=175, state=SUCCESS; CloseRegionProcedure 4f9df011687dc0e0feacf03cbc677973, server=7a6e4bf70377,42741,1734378236851 in 168 msec 2024-12-16T19:47:35,189 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=4f9df011687dc0e0feacf03cbc677973, UNASSIGN in 176 msec 2024-12-16T19:47:35,189 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:47:35,190 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:47:35,190 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708. 2024-12-16T19:47:35,190 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for b86b582edae56e07d8340b1ef1f3a708: 2024-12-16T19:47:35,192 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:35,192 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=b86b582edae56e07d8340b1ef1f3a708, regionState=CLOSED 2024-12-16T19:47:35,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-12-16T19:47:35,195 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; CloseRegionProcedure b86b582edae56e07d8340b1ef1f3a708, server=7a6e4bf70377,42861,1734378237024 in 176 msec 2024-12-16T19:47:35,196 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=174 2024-12-16T19:47:35,196 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=b86b582edae56e07d8340b1ef1f3a708, UNASSIGN in 183 msec 2024-12-16T19:47:35,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-16T19:47:35,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 190 msec 2024-12-16T19:47:35,200 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378455200"}]},"ts":"1734378455200"} 2024-12-16T19:47:35,201 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-16T19:47:35,214 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-16T19:47:35,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 256 msec 2024-12-16T19:47:35,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-16T19:47:35,265 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-16T19:47:35,266 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,267 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,268 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,269 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,271 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:35,271 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:35,272 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/recovered.edits] 2024-12-16T19:47:35,272 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/recovered.edits] 2024-12-16T19:47:35,276 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf/95049c464e7f45ef83446bfb17a3c1b2 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/cf/95049c464e7f45ef83446bfb17a3c1b2 2024-12-16T19:47:35,276 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf/3cc0d651249c4053963a04c629d1240d to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/cf/3cc0d651249c4053963a04c629d1240d 2024-12-16T19:47:35,279 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973/recovered.edits/9.seqid 2024-12-16T19:47:35,280 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708/recovered.edits/9.seqid 2024-12-16T19:47:35,280 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:35,280 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testEmptyExportFileSystemState/4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:35,280 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-16T19:47:35,281 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-16T19:47:35,282 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-12-16T19:47:35,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,287 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-16T19:47:35,287 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241216fdc4b002d3e640779d73ad23d473d0db_b86b582edae56e07d8340b1ef1f3a708 2024-12-16T19:47:35,287 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-16T19:47:35,287 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241216d0d08b206578407e80f0fd4e03e5f49e_4f9df011687dc0e0feacf03cbc677973 2024-12-16T19:47:35,287 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-16T19:47:35,289 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-12-16T19:47:35,291 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,294 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-16T19:47:35,296 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,297 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,297 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,297 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,297 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,297 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-16T19:47:35,297 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378455297"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:35,297 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378455297"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:35,298 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,298 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,299 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,299 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-16T19:47:35,301 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:47:35,301 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 4f9df011687dc0e0feacf03cbc677973, NAME => 'testtb-testEmptyExportFileSystemState,,1734378439634.4f9df011687dc0e0feacf03cbc677973.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b86b582edae56e07d8340b1ef1f3a708, NAME => 'testtb-testEmptyExportFileSystemState,1,1734378439634.b86b582edae56e07d8340b1ef1f3a708.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:47:35,301 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-16T19:47:35,301 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378455301"}]},"ts":"9223372036854775807"} 2024-12-16T19:47:35,303 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-16T19:47:35,312 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-16T19:47:35,313 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 46 msec 2024-12-16T19:47:35,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-16T19:47:35,400 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-16T19:47:35,405 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-16T19:47:35,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:35,408 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-16T19:47:35,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-16T19:47:35,430 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=810 (was 798) Potentially hanging thread: process reaper (pid 73752) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39435 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:37371 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1027860295_1 at /127.0.0.1:54676 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1027860295_1 at /127.0.0.1:49770 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37371 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:47772 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6133 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:39435 from appattempt_1734378249643_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:57904 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-43 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:53172 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=823 (was 797) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=797 (was 875), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=1000 (was 2326) 2024-12-16T19:47:35,430 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-16T19:47:35,454 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=810, OpenFileDescriptor=823, MaxFileDescriptor=1048576, SystemLoadAverage=797, ProcessCount=17, AvailableMemoryMB=999 2024-12-16T19:47:35,454 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=810 is superior to 500 2024-12-16T19:47:35,457 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:47:35,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:47:35,459 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:47:35,460 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-16T19:47:35,461 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:47:35,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T19:47:35,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742277_1453 (size=440) 2024-12-16T19:47:35,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742277_1453 (size=440) 2024-12-16T19:47:35,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742277_1453 (size=440) 2024-12-16T19:47:35,487 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 89cd6ef578a4222ebe98f2a62b571ba1, NAME => 'testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:35,487 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 999ca5763293d4e8a35303514e7f9365, NAME => 'testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:35,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T19:47:35,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742278_1454 (size=65) 2024-12-16T19:47:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742278_1454 (size=65) 2024-12-16T19:47:35,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742278_1454 (size=65) 2024-12-16T19:47:35,575 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:35,576 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing 999ca5763293d4e8a35303514e7f9365, disabling compactions & flushes 2024-12-16T19:47:35,576 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,576 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,576 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. after waiting 0 ms 2024-12-16T19:47:35,576 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,576 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,576 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for 999ca5763293d4e8a35303514e7f9365: 2024-12-16T19:47:35,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742279_1455 (size=65) 2024-12-16T19:47:35,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742279_1455 (size=65) 2024-12-16T19:47:35,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742279_1455 (size=65) 2024-12-16T19:47:35,584 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:35,584 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 89cd6ef578a4222ebe98f2a62b571ba1, disabling compactions & flushes 2024-12-16T19:47:35,584 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,584 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,584 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. after waiting 0 ms 2024-12-16T19:47:35,584 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,584 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,584 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 89cd6ef578a4222ebe98f2a62b571ba1: 2024-12-16T19:47:35,586 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:47:35,586 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734378455586"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378455586"}]},"ts":"1734378455586"} 2024-12-16T19:47:35,586 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1734378455586"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378455586"}]},"ts":"1734378455586"} 2024-12-16T19:47:35,588 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:47:35,589 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:47:35,589 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378455589"}]},"ts":"1734378455589"} 2024-12-16T19:47:35,591 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-16T19:47:35,605 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:47:35,607 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:47:35,607 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:47:35,607 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:47:35,607 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:47:35,607 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:47:35,607 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:47:35,607 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:47:35,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, ASSIGN}] 2024-12-16T19:47:35,608 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, ASSIGN 2024-12-16T19:47:35,609 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, ASSIGN 2024-12-16T19:47:35,610 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, ASSIGN; state=OFFLINE, location=7a6e4bf70377,45217,1734378236605; forceNewPlan=false, retain=false 2024-12-16T19:47:35,610 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:47:35,760 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:47:35,761 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=999ca5763293d4e8a35303514e7f9365, regionState=OPENING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:35,761 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=89cd6ef578a4222ebe98f2a62b571ba1, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:35,763 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:47:35,764 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure 999ca5763293d4e8a35303514e7f9365, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:47:35,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T19:47:35,916 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:35,917 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:35,922 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,922 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,923 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => 999ca5763293d4e8a35303514e7f9365, NAME => 'testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:47:35,923 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 89cd6ef578a4222ebe98f2a62b571ba1, NAME => 'testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:47:35,923 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. service=AccessControlService 2024-12-16T19:47:35,923 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. service=AccessControlService 2024-12-16T19:47:35,923 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:35,923 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,924 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,927 INFO [StoreOpener-89cd6ef578a4222ebe98f2a62b571ba1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,927 INFO [StoreOpener-999ca5763293d4e8a35303514e7f9365-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,928 INFO [StoreOpener-89cd6ef578a4222ebe98f2a62b571ba1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 89cd6ef578a4222ebe98f2a62b571ba1 columnFamilyName cf 2024-12-16T19:47:35,928 INFO [StoreOpener-999ca5763293d4e8a35303514e7f9365-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 999ca5763293d4e8a35303514e7f9365 columnFamilyName cf 2024-12-16T19:47:35,929 DEBUG [StoreOpener-89cd6ef578a4222ebe98f2a62b571ba1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:35,929 DEBUG [StoreOpener-999ca5763293d4e8a35303514e7f9365-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:35,929 INFO [StoreOpener-89cd6ef578a4222ebe98f2a62b571ba1-1 {}] regionserver.HStore(327): Store=89cd6ef578a4222ebe98f2a62b571ba1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:35,929 INFO [StoreOpener-999ca5763293d4e8a35303514e7f9365-1 {}] regionserver.HStore(327): Store=999ca5763293d4e8a35303514e7f9365/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:47:35,930 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,930 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,930 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,930 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,932 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:35,932 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:35,933 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:35,933 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:47:35,933 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened 999ca5763293d4e8a35303514e7f9365; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72068833, jitterRate=0.07390929758548737}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:35,933 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 89cd6ef578a4222ebe98f2a62b571ba1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65657370, jitterRate=-0.02162894606590271}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:47:35,934 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 89cd6ef578a4222ebe98f2a62b571ba1: 2024-12-16T19:47:35,934 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for 999ca5763293d4e8a35303514e7f9365: 2024-12-16T19:47:35,934 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365., pid=184, masterSystemTime=1734378455917 2024-12-16T19:47:35,934 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1., pid=183, masterSystemTime=1734378455916 2024-12-16T19:47:35,935 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,935 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:35,936 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=89cd6ef578a4222ebe98f2a62b571ba1, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:35,936 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,936 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:35,936 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=999ca5763293d4e8a35303514e7f9365, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:35,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-16T19:47:35,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1, server=7a6e4bf70377,42861,1734378237024 in 174 msec 2024-12-16T19:47:35,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-16T19:47:35,939 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure 999ca5763293d4e8a35303514e7f9365, server=7a6e4bf70377,45217,1734378236605 in 174 msec 2024-12-16T19:47:35,939 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, ASSIGN in 332 msec 2024-12-16T19:47:35,940 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-16T19:47:35,940 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, ASSIGN in 332 msec 2024-12-16T19:47:35,940 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:47:35,941 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378455940"}]},"ts":"1734378455940"} 2024-12-16T19:47:35,942 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-16T19:47:35,979 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:47:35,979 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-16T19:47:35,980 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-16T19:47:35,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,998 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-16T19:47:35,999 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 540 msec 2024-12-16T19:47:36,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-16T19:47:36,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-16T19:47:36,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-16T19:47:36,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-16T19:47:36,067 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-16T19:47:36,070 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-16T19:47:36,070 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:36,070 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:36,091 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T19:47:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378456091 (current time:1734378456091). 2024-12-16T19:47:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:47:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-16T19:47:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:36,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2152efbe to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b7556df 2024-12-16T19:47:36,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10b340c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:36,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:36,106 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58700, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:36,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2152efbe to 127.0.0.1:62515 2024-12-16T19:47:36,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:36,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d65438d to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c773150 2024-12-16T19:47:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2648e973, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:36,124 DEBUG [hconnection-0xece310b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:36,125 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:36,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:36,127 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d65438d to 127.0.0.1:62515 2024-12-16T19:47:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-16T19:47:36,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:36,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T19:47:36,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-16T19:47:36,130 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:36,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-16T19:47:36,130 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:36,132 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:36,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742280_1456 (size=161) 2024-12-16T19:47:36,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742280_1456 (size=161) 2024-12-16T19:47:36,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742280_1456 (size=161) 2024-12-16T19:47:36,138 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:36,138 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1}] 2024-12-16T19:47:36,139 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:36,139 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:36,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-16T19:47:36,290 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:36,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:36,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-16T19:47:36,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-16T19:47:36,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:36,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 89cd6ef578a4222ebe98f2a62b571ba1: 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for 999ca5763293d4e8a35303514e7f9365: 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. for emptySnaptb0-testExportWithChecksum completed. 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. for emptySnaptb0-testExportWithChecksum completed. 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:47:36,292 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:47:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742281_1457 (size=68) 2024-12-16T19:47:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742282_1458 (size=68) 2024-12-16T19:47:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742282_1458 (size=68) 2024-12-16T19:47:36,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742281_1457 (size=68) 2024-12-16T19:47:36,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742282_1458 (size=68) 2024-12-16T19:47:36,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742281_1457 (size=68) 2024-12-16T19:47:36,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:36,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-16T19:47:36,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:36,303 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-16T19:47:36,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-16T19:47:36,304 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:36,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-16T19:47:36,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:36,304 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:36,304 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:36,306 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1 in 167 msec 2024-12-16T19:47:36,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-16T19:47:36,306 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365 in 167 msec 2024-12-16T19:47:36,306 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:36,307 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:36,307 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:36,308 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:36,308 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:36,308 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:47:36,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742283_1459 (size=60) 2024-12-16T19:47:36,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742283_1459 (size=60) 2024-12-16T19:47:36,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742283_1459 (size=60) 2024-12-16T19:47:36,314 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:36,314 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-16T19:47:36,315 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-16T19:47:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742284_1460 (size=641) 2024-12-16T19:47:36,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742284_1460 (size=641) 2024-12-16T19:47:36,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742284_1460 (size=641) 2024-12-16T19:47:36,326 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:36,330 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:36,330 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-16T19:47:36,332 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:36,332 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-16T19:47:36,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 204 msec 2024-12-16T19:47:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-16T19:47:36,433 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-16T19:47:36,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45217 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:36,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:47:36,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-16T19:47:36,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:36,441 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:47:36,451 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T19:47:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378456451 (current time:1734378456451). 2024-12-16T19:47:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:47:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-16T19:47:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:47:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6be5c087 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f8f9349 2024-12-16T19:47:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14eb50f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:36,464 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6be5c087 to 127.0.0.1:62515 2024-12-16T19:47:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x412e56ba to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22d8800d 2024-12-16T19:47:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cfa783e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:47:36,482 DEBUG [hconnection-0x3a5cfa0a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:36,483 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58716, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:47:36,485 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:47:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x412e56ba to 127.0.0.1:62515 2024-12-16T19:47:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:47:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-16T19:47:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:47:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-16T19:47:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-16T19:47:36,489 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:47:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T19:47:36,490 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:47:36,492 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:47:36,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742285_1461 (size=156) 2024-12-16T19:47:36,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742285_1461 (size=156) 2024-12-16T19:47:36,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742285_1461 (size=156) 2024-12-16T19:47:36,500 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:47:36,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1}] 2024-12-16T19:47:36,501 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:36,501 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:36,501 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-16T19:47:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T19:47:36,653 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:47:36,654 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:47:36,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-16T19:47:36,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45217 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-16T19:47:36,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:36,655 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:36,655 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing 999ca5763293d4e8a35303514e7f9365 1/1 column families, dataSize=534 B heapSize=1.38 KB 2024-12-16T19:47:36,655 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 89cd6ef578a4222ebe98f2a62b571ba1 1/1 column families, dataSize=2.74 KB heapSize=6.16 KB 2024-12-16T19:47:36,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365 is 71, key is 02106623bbf69862a4c35d9b44b5c9a6/cf:q/1734378456436/Put/seqid=0 2024-12-16T19:47:36,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1 is 71, key is 10abb2b84fb4aa591c44a4ebde50f4cb/cf:q/1734378456437/Put/seqid=0 2024-12-16T19:47:36,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742286_1462 (size=5451) 2024-12-16T19:47:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742286_1462 (size=5451) 2024-12-16T19:47:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742287_1463 (size=7821) 2024-12-16T19:47:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742286_1462 (size=5451) 2024-12-16T19:47:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742287_1463 (size=7821) 2024-12-16T19:47:36,680 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:36,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742287_1463 (size=7821) 2024-12-16T19:47:36,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:36,684 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:36,684 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:36,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/.tmp/cf/6056ec3d3fb745a69748dedf1cf7d15d, store: [table=testtb-testExportWithChecksum family=cf region=89cd6ef578a4222ebe98f2a62b571ba1] 2024-12-16T19:47:36,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/.tmp/cf/6690194a73264b16846442da53e1caaa, store: [table=testtb-testExportWithChecksum family=cf region=999ca5763293d4e8a35303514e7f9365] 2024-12-16T19:47:36,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/.tmp/cf/6056ec3d3fb745a69748dedf1cf7d15d is 206, key is 1ae84704c5952a84f47058b5d8377715d/cf:q/1734378456437/Put/seqid=0 2024-12-16T19:47:36,685 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/.tmp/cf/6690194a73264b16846442da53e1caaa is 206, key is 02cab87b89df77ade08749ff2110c71a5/cf:q/1734378456436/Put/seqid=0 2024-12-16T19:47:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742288_1464 (size=13843) 2024-12-16T19:47:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742288_1464 (size=13843) 2024-12-16T19:47:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742289_1465 (size=6922) 2024-12-16T19:47:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742289_1465 (size=6922) 2024-12-16T19:47:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742289_1465 (size=6922) 2024-12-16T19:47:36,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742288_1464 (size=13843) 2024-12-16T19:47:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T19:47:37,090 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=534, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/.tmp/cf/6690194a73264b16846442da53e1caaa 2024-12-16T19:47:37,091 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/.tmp/cf/6056ec3d3fb745a69748dedf1cf7d15d 2024-12-16T19:47:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T19:47:37,094 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/.tmp/cf/6690194a73264b16846442da53e1caaa as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf/6690194a73264b16846442da53e1caaa 2024-12-16T19:47:37,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/.tmp/cf/6056ec3d3fb745a69748dedf1cf7d15d as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d 2024-12-16T19:47:37,098 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf/6690194a73264b16846442da53e1caaa, entries=8, sequenceid=6, filesize=6.8 K 2024-12-16T19:47:37,098 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d, entries=42, sequenceid=6, filesize=13.5 K 2024-12-16T19:47:37,099 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~534 B/534, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 999ca5763293d4e8a35303514e7f9365 in 444ms, sequenceid=6, compaction requested=false 2024-12-16T19:47:37,099 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~2.74 KB/2802, heapSize ~6.14 KB/6288, currentSize=0 B/0 for 89cd6ef578a4222ebe98f2a62b571ba1 in 444ms, sequenceid=6, compaction requested=false 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for 999ca5763293d4e8a35303514e7f9365: 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 89cd6ef578a4222ebe98f2a62b571ba1: 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. for snaptb0-testExportWithChecksum completed. 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. for snaptb0-testExportWithChecksum completed. 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d] hfiles 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf/6690194a73264b16846442da53e1caaa] hfiles 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d for snapshot=snaptb0-testExportWithChecksum 2024-12-16T19:47:37,099 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf/6690194a73264b16846442da53e1caaa for snapshot=snaptb0-testExportWithChecksum 2024-12-16T19:47:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742290_1466 (size=107) 2024-12-16T19:47:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742290_1466 (size=107) 2024-12-16T19:47:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742290_1466 (size=107) 2024-12-16T19:47:37,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:47:37,112 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-16T19:47:37,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-16T19:47:37,112 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:37,112 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:37,113 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure 999ca5763293d4e8a35303514e7f9365 in 613 msec 2024-12-16T19:47:37,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742291_1467 (size=107) 2024-12-16T19:47:37,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742291_1467 (size=107) 2024-12-16T19:47:37,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742291_1467 (size=107) 2024-12-16T19:47:37,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:47:37,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-16T19:47:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-16T19:47:37,120 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:37,120 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:37,122 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=188 2024-12-16T19:47:37,122 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:47:37,122 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1 in 621 msec 2024-12-16T19:47:37,123 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:47:37,124 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:47:37,124 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:47:37,124 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:47:37,125 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365] hfiles 2024-12-16T19:47:37,125 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:47:37,125 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365 2024-12-16T19:47:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742292_1468 (size=291) 2024-12-16T19:47:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742292_1468 (size=291) 2024-12-16T19:47:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742292_1468 (size=291) 2024-12-16T19:47:37,131 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:47:37,131 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-16T19:47:37,132 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T19:47:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742293_1469 (size=951) 2024-12-16T19:47:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742293_1469 (size=951) 2024-12-16T19:47:37,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742293_1469 (size=951) 2024-12-16T19:47:37,145 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:47:37,150 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:47:37,150 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-16T19:47:37,151 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:47:37,151 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-16T19:47:37,152 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 664 msec 2024-12-16T19:47:37,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-16T19:47:37,594 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-16T19:47:37,595 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594 2024-12-16T19:47:37,595 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:37,625 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:47:37,625 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@11dd3e06, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T19:47:37,627 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:47:37,635 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T19:47:37,655 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:37,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:37,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:37,656 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-10020445453336744056.jar 2024-12-16T19:47:38,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,741 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-15759984271521000991.jar 2024-12-16T19:47:38,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,742 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:47:38,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:47:38,743 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:47:38,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:47:38,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:47:38,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:47:38,744 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:47:38,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:47:38,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:47:38,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:47:38,745 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:47:38,747 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:47:38,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:47:38,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:38,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:38,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:47:38,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:38,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:47:38,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:47:38,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:47:38,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742294_1470 (size=127628) 2024-12-16T19:47:38,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742294_1470 (size=127628) 2024-12-16T19:47:38,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742294_1470 (size=127628) 2024-12-16T19:47:38,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742295_1471 (size=2172137) 2024-12-16T19:47:38,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742295_1471 (size=2172137) 2024-12-16T19:47:38,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742295_1471 (size=2172137) 2024-12-16T19:47:38,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742296_1472 (size=213228) 2024-12-16T19:47:39,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742296_1472 (size=213228) 2024-12-16T19:47:39,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742296_1472 (size=213228) 2024-12-16T19:47:39,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742297_1473 (size=1877034) 2024-12-16T19:47:39,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742297_1473 (size=1877034) 2024-12-16T19:47:39,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742297_1473 (size=1877034) 2024-12-16T19:47:39,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742298_1474 (size=533455) 2024-12-16T19:47:39,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742298_1474 (size=533455) 2024-12-16T19:47:39,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742298_1474 (size=533455) 2024-12-16T19:47:39,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742299_1475 (size=7280644) 2024-12-16T19:47:39,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742299_1475 (size=7280644) 2024-12-16T19:47:39,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742299_1475 (size=7280644) 2024-12-16T19:47:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742300_1476 (size=4188619) 2024-12-16T19:47:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742300_1476 (size=4188619) 2024-12-16T19:47:39,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742300_1476 (size=4188619) 2024-12-16T19:47:39,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742301_1477 (size=20406) 2024-12-16T19:47:39,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742301_1477 (size=20406) 2024-12-16T19:47:39,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742301_1477 (size=20406) 2024-12-16T19:47:39,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742302_1478 (size=75495) 2024-12-16T19:47:39,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742302_1478 (size=75495) 2024-12-16T19:47:39,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742302_1478 (size=75495) 2024-12-16T19:47:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742303_1479 (size=45609) 2024-12-16T19:47:39,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742303_1479 (size=45609) 2024-12-16T19:47:39,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742303_1479 (size=45609) 2024-12-16T19:47:39,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742304_1480 (size=110084) 2024-12-16T19:47:39,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742304_1480 (size=110084) 2024-12-16T19:47:39,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742304_1480 (size=110084) 2024-12-16T19:47:39,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742305_1481 (size=6350922) 2024-12-16T19:47:39,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742305_1481 (size=6350922) 2024-12-16T19:47:39,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742305_1481 (size=6350922) 2024-12-16T19:47:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742306_1482 (size=1323991) 2024-12-16T19:47:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742306_1482 (size=1323991) 2024-12-16T19:47:39,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742306_1482 (size=1323991) 2024-12-16T19:47:39,350 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0007_000001 (auth:SIMPLE) from 127.0.0.1:37994 2024-12-16T19:47:39,362 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0007/container_1734378249643_0007_01_000001/launch_container.sh] 2024-12-16T19:47:39,362 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0007/container_1734378249643_0007_01_000001/container_tokens] 2024-12-16T19:47:39,362 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0007/container_1734378249643_0007_01_000001/sysfs] 2024-12-16T19:47:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742307_1483 (size=23076) 2024-12-16T19:47:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742307_1483 (size=23076) 2024-12-16T19:47:39,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742307_1483 (size=23076) 2024-12-16T19:47:39,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742308_1484 (size=126803) 2024-12-16T19:47:39,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742308_1484 (size=126803) 2024-12-16T19:47:39,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742308_1484 (size=126803) 2024-12-16T19:47:39,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742309_1485 (size=322274) 2024-12-16T19:47:39,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742309_1485 (size=322274) 2024-12-16T19:47:39,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742309_1485 (size=322274) 2024-12-16T19:47:39,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742310_1486 (size=1832290) 2024-12-16T19:47:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742310_1486 (size=1832290) 2024-12-16T19:47:39,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742310_1486 (size=1832290) 2024-12-16T19:47:39,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742311_1487 (size=30081) 2024-12-16T19:47:39,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742311_1487 (size=30081) 2024-12-16T19:47:39,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742311_1487 (size=30081) 2024-12-16T19:47:39,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742312_1488 (size=53616) 2024-12-16T19:47:39,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742312_1488 (size=53616) 2024-12-16T19:47:39,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742312_1488 (size=53616) 2024-12-16T19:47:39,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742313_1489 (size=29229) 2024-12-16T19:47:39,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742313_1489 (size=29229) 2024-12-16T19:47:39,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742313_1489 (size=29229) 2024-12-16T19:47:39,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742314_1490 (size=169089) 2024-12-16T19:47:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742314_1490 (size=169089) 2024-12-16T19:47:39,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742314_1490 (size=169089) 2024-12-16T19:47:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742315_1491 (size=451756) 2024-12-16T19:47:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742315_1491 (size=451756) 2024-12-16T19:47:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742315_1491 (size=451756) 2024-12-16T19:47:40,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742316_1492 (size=5175431) 2024-12-16T19:47:40,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742316_1492 (size=5175431) 2024-12-16T19:47:40,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742316_1492 (size=5175431) 2024-12-16T19:47:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742317_1493 (size=136454) 2024-12-16T19:47:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742317_1493 (size=136454) 2024-12-16T19:47:40,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742317_1493 (size=136454) 2024-12-16T19:47:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742318_1494 (size=907467) 2024-12-16T19:47:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742318_1494 (size=907467) 2024-12-16T19:47:40,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742318_1494 (size=907467) 2024-12-16T19:47:40,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742319_1495 (size=3317408) 2024-12-16T19:47:40,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742319_1495 (size=3317408) 2024-12-16T19:47:40,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742319_1495 (size=3317408) 2024-12-16T19:47:40,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742320_1496 (size=503880) 2024-12-16T19:47:40,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742320_1496 (size=503880) 2024-12-16T19:47:40,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742320_1496 (size=503880) 2024-12-16T19:47:40,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742321_1497 (size=4695811) 2024-12-16T19:47:40,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742321_1497 (size=4695811) 2024-12-16T19:47:40,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742321_1497 (size=4695811) 2024-12-16T19:47:40,569 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:47:40,571 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-16T19:47:40,573 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.2 K 2024-12-16T19:47:40,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742322_1498 (size=714) 2024-12-16T19:47:40,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742322_1498 (size=714) 2024-12-16T19:47:40,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742322_1498 (size=714) 2024-12-16T19:47:40,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742323_1499 (size=15) 2024-12-16T19:47:40,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742323_1499 (size=15) 2024-12-16T19:47:40,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742323_1499 (size=15) 2024-12-16T19:47:40,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742324_1500 (size=304929) 2024-12-16T19:47:40,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742324_1500 (size=304929) 2024-12-16T19:47:40,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742324_1500 (size=304929) 2024-12-16T19:47:40,627 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:47:40,627 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:47:40,685 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:47:40,876 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:44322 2024-12-16T19:47:47,686 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:36994 2024-12-16T19:47:47,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742325_1501 (size=350603) 2024-12-16T19:47:47,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742325_1501 (size=350603) 2024-12-16T19:47:47,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742325_1501 (size=350603) 2024-12-16T19:47:49,985 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:44234 2024-12-16T19:47:52,555 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9607b295cff8a6b815204957546cba58, had cached 0 bytes from a total of 6284 2024-12-16T19:47:52,555 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 34c062451cb346b64f29e9d2d97ae5fd, had cached 0 bytes from a total of 14267 2024-12-16T19:47:53,715 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000002/launch_container.sh] 2024-12-16T19:47:53,715 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000002/container_tokens] 2024-12-16T19:47:53,715 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594/archive/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-16T19:47:54,256 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:47:54,775 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:59866 2024-12-16T19:47:59,479 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000003/launch_container.sh] 2024-12-16T19:47:59,479 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000003/container_tokens] 2024-12-16T19:47:59,479 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594/archive/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-16T19:48:00,789 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:41258 2024-12-16T19:48:04,404 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 34c062451cb346b64f29e9d2d97ae5fd changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:48:04,404 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 89cd6ef578a4222ebe98f2a62b571ba1 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:48:04,404 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 9607b295cff8a6b815204957546cba58 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:48:04,404 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 999ca5763293d4e8a35303514e7f9365 changed from -1.0 to 0.0, refreshing cache 2024-12-16T19:48:05,118 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000004/launch_container.sh] 2024-12-16T19:48:05,118 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000004/container_tokens] 2024-12-16T19:48:05,118 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/local-export-1734378457594/archive/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-16T19:48:06,834 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:55782 2024-12-16T19:48:12,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742326_1502 (size=21340) 2024-12-16T19:48:12,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742326_1502 (size=21340) 2024-12-16T19:48:12,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742326_1502 (size=21340) 2024-12-16T19:48:12,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742327_1503 (size=460) 2024-12-16T19:48:12,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742327_1503 (size=460) 2024-12-16T19:48:12,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742327_1503 (size=460) 2024-12-16T19:48:12,314 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_3/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000005/launch_container.sh] 2024-12-16T19:48:12,314 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_3/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000005/container_tokens] 2024-12-16T19:48:12,314 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_3/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000005/sysfs] 2024-12-16T19:48:12,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742328_1504 (size=21340) 2024-12-16T19:48:12,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742328_1504 (size=21340) 2024-12-16T19:48:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742328_1504 (size=21340) 2024-12-16T19:48:12,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742329_1505 (size=350603) 2024-12-16T19:48:12,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742329_1505 (size=350603) 2024-12-16T19:48:12,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742329_1505 (size=350603) 2024-12-16T19:48:12,445 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:55798 2024-12-16T19:48:13,938 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1734378249643_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:48:13,939 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939 2024-12-16T19:48:13,940 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:48:13,978 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:48:13,978 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T19:48:13,981 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:48:13,991 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-16T19:48:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742330_1506 (size=156) 2024-12-16T19:48:14,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742330_1506 (size=156) 2024-12-16T19:48:14,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742330_1506 (size=156) 2024-12-16T19:48:14,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742331_1507 (size=951) 2024-12-16T19:48:14,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742331_1507 (size=951) 2024-12-16T19:48:14,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742331_1507 (size=951) 2024-12-16T19:48:14,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:14,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:14,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:14,028 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:14,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-9361560628071405356.jar 2024-12-16T19:48:14,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:14,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-7822701461945810534.jar 2024-12-16T19:48:15,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,063 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:15,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:48:15,064 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:48:15,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:48:15,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:48:15,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:48:15,065 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:48:15,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:48:15,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:48:15,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:48:15,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:48:15,066 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:48:15,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:48:15,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:15,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:15,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:48:15,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:15,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:15,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:48:15,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:48:15,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742332_1508 (size=127628) 2024-12-16T19:48:15,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742332_1508 (size=127628) 2024-12-16T19:48:15,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742332_1508 (size=127628) 2024-12-16T19:48:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742333_1509 (size=2172137) 2024-12-16T19:48:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742333_1509 (size=2172137) 2024-12-16T19:48:15,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742333_1509 (size=2172137) 2024-12-16T19:48:15,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742334_1510 (size=213228) 2024-12-16T19:48:15,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742334_1510 (size=213228) 2024-12-16T19:48:15,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742334_1510 (size=213228) 2024-12-16T19:48:15,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742335_1511 (size=1877034) 2024-12-16T19:48:15,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742335_1511 (size=1877034) 2024-12-16T19:48:15,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742335_1511 (size=1877034) 2024-12-16T19:48:15,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742336_1512 (size=533455) 2024-12-16T19:48:15,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742336_1512 (size=533455) 2024-12-16T19:48:15,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742336_1512 (size=533455) 2024-12-16T19:48:15,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742337_1513 (size=7280644) 2024-12-16T19:48:15,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742337_1513 (size=7280644) 2024-12-16T19:48:15,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742337_1513 (size=7280644) 2024-12-16T19:48:15,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742338_1514 (size=4188619) 2024-12-16T19:48:15,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742338_1514 (size=4188619) 2024-12-16T19:48:15,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742338_1514 (size=4188619) 2024-12-16T19:48:15,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742339_1515 (size=20406) 2024-12-16T19:48:15,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742339_1515 (size=20406) 2024-12-16T19:48:15,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742339_1515 (size=20406) 2024-12-16T19:48:15,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742340_1516 (size=75495) 2024-12-16T19:48:15,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742340_1516 (size=75495) 2024-12-16T19:48:15,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742340_1516 (size=75495) 2024-12-16T19:48:15,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742341_1517 (size=45609) 2024-12-16T19:48:15,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742341_1517 (size=45609) 2024-12-16T19:48:15,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742341_1517 (size=45609) 2024-12-16T19:48:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742342_1518 (size=110084) 2024-12-16T19:48:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742342_1518 (size=110084) 2024-12-16T19:48:15,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742342_1518 (size=110084) 2024-12-16T19:48:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742343_1519 (size=6350922) 2024-12-16T19:48:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742343_1519 (size=6350922) 2024-12-16T19:48:16,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742343_1519 (size=6350922) 2024-12-16T19:48:16,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742344_1520 (size=1323991) 2024-12-16T19:48:16,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742344_1520 (size=1323991) 2024-12-16T19:48:16,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742344_1520 (size=1323991) 2024-12-16T19:48:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742345_1521 (size=23076) 2024-12-16T19:48:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742345_1521 (size=23076) 2024-12-16T19:48:16,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742345_1521 (size=23076) 2024-12-16T19:48:16,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742346_1522 (size=126803) 2024-12-16T19:48:16,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742346_1522 (size=126803) 2024-12-16T19:48:16,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742346_1522 (size=126803) 2024-12-16T19:48:16,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742347_1523 (size=322274) 2024-12-16T19:48:16,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742347_1523 (size=322274) 2024-12-16T19:48:16,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742347_1523 (size=322274) 2024-12-16T19:48:16,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742348_1524 (size=1832290) 2024-12-16T19:48:16,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742348_1524 (size=1832290) 2024-12-16T19:48:16,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742348_1524 (size=1832290) 2024-12-16T19:48:16,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742349_1525 (size=451756) 2024-12-16T19:48:16,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742349_1525 (size=451756) 2024-12-16T19:48:16,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742349_1525 (size=451756) 2024-12-16T19:48:16,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742350_1526 (size=30081) 2024-12-16T19:48:16,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742350_1526 (size=30081) 2024-12-16T19:48:16,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742350_1526 (size=30081) 2024-12-16T19:48:17,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742351_1527 (size=53616) 2024-12-16T19:48:17,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742351_1527 (size=53616) 2024-12-16T19:48:17,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742351_1527 (size=53616) 2024-12-16T19:48:17,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742352_1528 (size=29229) 2024-12-16T19:48:17,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742352_1528 (size=29229) 2024-12-16T19:48:17,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742352_1528 (size=29229) 2024-12-16T19:48:17,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742353_1529 (size=169089) 2024-12-16T19:48:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742353_1529 (size=169089) 2024-12-16T19:48:17,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742353_1529 (size=169089) 2024-12-16T19:48:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742354_1530 (size=5175431) 2024-12-16T19:48:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742354_1530 (size=5175431) 2024-12-16T19:48:17,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742354_1530 (size=5175431) 2024-12-16T19:48:17,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742355_1531 (size=136454) 2024-12-16T19:48:17,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742355_1531 (size=136454) 2024-12-16T19:48:17,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742355_1531 (size=136454) 2024-12-16T19:48:17,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742356_1532 (size=907467) 2024-12-16T19:48:17,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742356_1532 (size=907467) 2024-12-16T19:48:17,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742356_1532 (size=907467) 2024-12-16T19:48:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742357_1533 (size=3317408) 2024-12-16T19:48:17,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742357_1533 (size=3317408) 2024-12-16T19:48:17,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742357_1533 (size=3317408) 2024-12-16T19:48:17,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742358_1534 (size=503880) 2024-12-16T19:48:17,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742358_1534 (size=503880) 2024-12-16T19:48:17,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742358_1534 (size=503880) 2024-12-16T19:48:17,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742359_1535 (size=4695811) 2024-12-16T19:48:17,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742359_1535 (size=4695811) 2024-12-16T19:48:17,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742359_1535 (size=4695811) 2024-12-16T19:48:17,231 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:48:17,233 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-16T19:48:17,234 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.2 K 2024-12-16T19:48:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742360_1536 (size=714) 2024-12-16T19:48:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742360_1536 (size=714) 2024-12-16T19:48:17,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742360_1536 (size=714) 2024-12-16T19:48:17,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742361_1537 (size=15) 2024-12-16T19:48:17,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742361_1537 (size=15) 2024-12-16T19:48:17,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742361_1537 (size=15) 2024-12-16T19:48:17,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742362_1538 (size=304875) 2024-12-16T19:48:17,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742362_1538 (size=304875) 2024-12-16T19:48:17,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742362_1538 (size=304875) 2024-12-16T19:48:18,522 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:48:18,522 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:48:18,526 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0008_000001 (auth:SIMPLE) from 127.0.0.1:43806 2024-12-16T19:48:18,549 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000001/launch_container.sh] 2024-12-16T19:48:18,549 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000001/container_tokens] 2024-12-16T19:48:18,549 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_1/usercache/jenkins/appcache/application_1734378249643_0008/container_1734378249643_0008_01_000001/sysfs] 2024-12-16T19:48:19,228 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0009_000001 (auth:SIMPLE) from 127.0.0.1:48936 2024-12-16T19:48:20,925 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 89cd6ef578a4222ebe98f2a62b571ba1, had cached 0 bytes from a total of 13843 2024-12-16T19:48:20,925 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 999ca5763293d4e8a35303514e7f9365, had cached 0 bytes from a total of 6922 2024-12-16T19:48:24,256 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:48:26,315 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0009_000001 (auth:SIMPLE) from 127.0.0.1:38056 2024-12-16T19:48:26,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742363_1539 (size=350549) 2024-12-16T19:48:26,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742363_1539 (size=350549) 2024-12-16T19:48:26,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742363_1539 (size=350549) 2024-12-16T19:48:28,588 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0009_000001 (auth:SIMPLE) from 127.0.0.1:48022 2024-12-16T19:48:34,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742364_1540 (size=13843) 2024-12-16T19:48:34,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742364_1540 (size=13843) 2024-12-16T19:48:34,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742364_1540 (size=13843) 2024-12-16T19:48:34,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742365_1541 (size=7821) 2024-12-16T19:48:34,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742365_1541 (size=7821) 2024-12-16T19:48:34,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742365_1541 (size=7821) 2024-12-16T19:48:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742366_1542 (size=6922) 2024-12-16T19:48:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742366_1542 (size=6922) 2024-12-16T19:48:34,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742366_1542 (size=6922) 2024-12-16T19:48:34,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742367_1543 (size=5451) 2024-12-16T19:48:34,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742367_1543 (size=5451) 2024-12-16T19:48:34,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742367_1543 (size=5451) 2024-12-16T19:48:34,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742368_1544 (size=17459) 2024-12-16T19:48:34,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742368_1544 (size=17459) 2024-12-16T19:48:34,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742368_1544 (size=17459) 2024-12-16T19:48:34,515 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_2/usercache/jenkins/appcache/application_1734378249643_0009/container_1734378249643_0009_01_000002/launch_container.sh] 2024-12-16T19:48:34,515 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_2/usercache/jenkins/appcache/application_1734378249643_0009/container_1734378249643_0009_01_000002/container_tokens] 2024-12-16T19:48:34,515 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_2/usercache/jenkins/appcache/application_1734378249643_0009/container_1734378249643_0009_01_000002/sysfs] 2024-12-16T19:48:34,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742369_1545 (size=462) 2024-12-16T19:48:34,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742369_1545 (size=462) 2024-12-16T19:48:34,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742369_1545 (size=462) 2024-12-16T19:48:34,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742370_1546 (size=17459) 2024-12-16T19:48:34,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742370_1546 (size=17459) 2024-12-16T19:48:34,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742370_1546 (size=17459) 2024-12-16T19:48:34,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742371_1547 (size=350549) 2024-12-16T19:48:34,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742371_1547 (size=350549) 2024-12-16T19:48:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742371_1547 (size=350549) 2024-12-16T19:48:34,743 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0009_000001 (auth:SIMPLE) from 127.0.0.1:48032 2024-12-16T19:48:36,439 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:48:36,441 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:48:36,459 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-16T19:48:36,459 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:48:36,466 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:48:36,467 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-16T19:48:36,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-16T19:48:36,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-16T19:48:36,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-16T19:48:36,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-16T19:48:36,475 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378493939/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-16T19:48:36,502 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-16T19:48:36,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-16T19:48:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:36,533 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378516532"}]},"ts":"1734378516532"} 2024-12-16T19:48:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T19:48:36,548 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-16T19:48:36,622 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-16T19:48:36,635 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-16T19:48:36,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T19:48:36,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, UNASSIGN}] 2024-12-16T19:48:36,655 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, UNASSIGN 2024-12-16T19:48:36,656 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, UNASSIGN 2024-12-16T19:48:36,660 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=999ca5763293d4e8a35303514e7f9365, regionState=CLOSING, regionLocation=7a6e4bf70377,45217,1734378236605 2024-12-16T19:48:36,660 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=89cd6ef578a4222ebe98f2a62b571ba1, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:36,668 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:48:36,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=193, state=RUNNABLE; CloseRegionProcedure 999ca5763293d4e8a35303514e7f9365, server=7a6e4bf70377,45217,1734378236605}] 2024-12-16T19:48:36,674 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:48:36,674 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE; CloseRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:48:36,824 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,45217,1734378236605 2024-12-16T19:48:36,825 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:48:36,825 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:48:36,825 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing 999ca5763293d4e8a35303514e7f9365, disabling compactions & flushes 2024-12-16T19:48:36,825 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:48:36,825 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:48:36,825 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. after waiting 0 ms 2024-12-16T19:48:36,825 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:48:36,829 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:48:36,829 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:48:36,829 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365. 2024-12-16T19:48:36,829 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for 999ca5763293d4e8a35303514e7f9365: 2024-12-16T19:48:36,830 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed 999ca5763293d4e8a35303514e7f9365 2024-12-16T19:48:36,831 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=999ca5763293d4e8a35303514e7f9365, regionState=CLOSED 2024-12-16T19:48:36,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=193 2024-12-16T19:48:36,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=193, state=SUCCESS; CloseRegionProcedure 999ca5763293d4e8a35303514e7f9365, server=7a6e4bf70377,45217,1734378236605 in 166 msec 2024-12-16T19:48:36,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=999ca5763293d4e8a35303514e7f9365, UNASSIGN in 190 msec 2024-12-16T19:48:36,840 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:36,841 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:48:36,841 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:48:36,841 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 89cd6ef578a4222ebe98f2a62b571ba1, disabling compactions & flushes 2024-12-16T19:48:36,841 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:48:36,841 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:48:36,841 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. after waiting 0 ms 2024-12-16T19:48:36,841 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:48:36,845 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:48:36,845 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:48:36,845 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1. 2024-12-16T19:48:36,845 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 89cd6ef578a4222ebe98f2a62b571ba1: 2024-12-16T19:48:36,846 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:48:36,847 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=89cd6ef578a4222ebe98f2a62b571ba1, regionState=CLOSED 2024-12-16T19:48:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T19:48:36,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=194 2024-12-16T19:48:36,850 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=194, state=SUCCESS; CloseRegionProcedure 89cd6ef578a4222ebe98f2a62b571ba1, server=7a6e4bf70377,42861,1734378237024 in 174 msec 2024-12-16T19:48:36,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=192 2024-12-16T19:48:36,851 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=89cd6ef578a4222ebe98f2a62b571ba1, UNASSIGN in 204 msec 2024-12-16T19:48:36,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-16T19:48:36,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 216 msec 2024-12-16T19:48:36,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378516853"}]},"ts":"1734378516853"} 2024-12-16T19:48:36,855 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-16T19:48:36,862 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-16T19:48:36,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 359 msec 2024-12-16T19:48:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-16T19:48:37,152 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-16T19:48:37,153 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-16T19:48:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:37,156 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-16T19:48:37,159 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:37,163 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-16T19:48:37,186 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365 2024-12-16T19:48:37,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T19:48:37,187 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T19:48:37,188 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T19:48:37,188 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-16T19:48:37,196 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:37,197 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:37,197 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:48:37,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:37,198 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/recovered.edits] 2024-12-16T19:48:37,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:37,198 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:37,206 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:37,214 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/recovered.edits] 2024-12-16T19:48:37,255 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf/6690194a73264b16846442da53e1caaa to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/cf/6690194a73264b16846442da53e1caaa 2024-12-16T19:48:37,278 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/cf/6056ec3d3fb745a69748dedf1cf7d15d 2024-12-16T19:48:37,281 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365/recovered.edits/9.seqid 2024-12-16T19:48:37,282 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/999ca5763293d4e8a35303514e7f9365 2024-12-16T19:48:37,294 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1/recovered.edits/9.seqid 2024-12-16T19:48:37,295 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportWithChecksum/89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:48:37,295 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-16T19:48:37,296 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-16T19:48:37,297 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-12-16T19:48:37,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-16T19:48:37,316 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b2024121630f2a8256d464e488ebd6471f20ca9c7_89cd6ef578a4222ebe98f2a62b571ba1 2024-12-16T19:48:37,321 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e20241216c4bfbe99592e498f8623d824768f3a87_999ca5763293d4e8a35303514e7f9365 2024-12-16T19:48:37,322 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-12-16T19:48:37,324 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:37,341 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-16T19:48:37,348 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-16T19:48:37,349 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:37,349 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-16T19:48:37,350 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378517350"}]},"ts":"9223372036854775807"} 2024-12-16T19:48:37,350 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378517350"}]},"ts":"9223372036854775807"} 2024-12-16T19:48:37,359 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:48:37,359 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 999ca5763293d4e8a35303514e7f9365, NAME => 'testtb-testExportWithChecksum,,1734378455456.999ca5763293d4e8a35303514e7f9365.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 89cd6ef578a4222ebe98f2a62b571ba1, NAME => 'testtb-testExportWithChecksum,1,1734378455456.89cd6ef578a4222ebe98f2a62b571ba1.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:48:37,359 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-16T19:48:37,359 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378517359"}]},"ts":"9223372036854775807"} 2024-12-16T19:48:37,376 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-16T19:48:37,387 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-16T19:48:37,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 234 msec 2024-12-16T19:48:37,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-16T19:48:37,501 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-16T19:48:37,522 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-16T19:48:37,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-16T19:48:37,525 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-16T19:48:37,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-16T19:48:37,555 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9607b295cff8a6b815204957546cba58, had cached 0 bytes from a total of 6284 2024-12-16T19:48:37,555 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 34c062451cb346b64f29e9d2d97ae5fd, had cached 0 bytes from a total of 14267 2024-12-16T19:48:37,570 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=805 (was 810), OpenFileDescriptor=813 (was 823), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=978 (was 797) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2573 (was 999) - AvailableMemoryMB LEAK? - 2024-12-16T19:48:37,570 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-16T19:48:37,607 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=805, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=978, ProcessCount=20, AvailableMemoryMB=2565 2024-12-16T19:48:37,607 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-16T19:48:37,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-16T19:48:37,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:37,612 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-16T19:48:37,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-16T19:48:37,613 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-16T19:48:37,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T19:48:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742372_1548 (size=454) 2024-12-16T19:48:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742372_1548 (size=454) 2024-12-16T19:48:37,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742372_1548 (size=454) 2024-12-16T19:48:37,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T19:48:37,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T19:48:38,087 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e95716fa0605c3dca0262a68c3774d78, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:48:38,087 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => ee921e8b6ba36fb129dbc310a6f60160, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:48:38,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742373_1549 (size=79) 2024-12-16T19:48:38,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742373_1549 (size=79) 2024-12-16T19:48:38,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742373_1549 (size=79) 2024-12-16T19:48:38,111 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:48:38,111 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing ee921e8b6ba36fb129dbc310a6f60160, disabling compactions & flushes 2024-12-16T19:48:38,111 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:38,111 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:38,111 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. after waiting 0 ms 2024-12-16T19:48:38,111 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:38,111 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:38,111 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for ee921e8b6ba36fb129dbc310a6f60160: 2024-12-16T19:48:38,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742374_1550 (size=79) 2024-12-16T19:48:38,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742374_1550 (size=79) 2024-12-16T19:48:38,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742374_1550 (size=79) 2024-12-16T19:48:38,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T19:48:38,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:48:38,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing e95716fa0605c3dca0262a68c3774d78, disabling compactions & flushes 2024-12-16T19:48:38,533 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:38,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:38,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. after waiting 0 ms 2024-12-16T19:48:38,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:38,533 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:38,533 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for e95716fa0605c3dca0262a68c3774d78: 2024-12-16T19:48:38,538 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-16T19:48:38,539 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734378518538"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378518538"}]},"ts":"1734378518538"} 2024-12-16T19:48:38,539 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1734378518538"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734378518538"}]},"ts":"1734378518538"} 2024-12-16T19:48:38,544 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-16T19:48:38,545 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-16T19:48:38,546 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378518545"}]},"ts":"1734378518545"} 2024-12-16T19:48:38,547 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-16T19:48:38,612 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:48:38,618 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:48:38,618 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:48:38,618 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:48:38,618 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:48:38,618 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:48:38,618 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:48:38,618 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-16T19:48:38,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, ASSIGN}] 2024-12-16T19:48:38,625 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, ASSIGN 2024-12-16T19:48:38,625 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, ASSIGN 2024-12-16T19:48:38,630 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42741,1734378236851; forceNewPlan=false, retain=false 2024-12-16T19:48:38,630 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, ASSIGN; state=OFFLINE, location=7a6e4bf70377,42861,1734378237024; forceNewPlan=false, retain=false 2024-12-16T19:48:38,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T19:48:38,782 INFO [7a6e4bf70377:45883 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-16T19:48:38,782 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=ee921e8b6ba36fb129dbc310a6f60160, regionState=OPENING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:38,783 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=e95716fa0605c3dca0262a68c3774d78, regionState=OPENING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:38,786 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure ee921e8b6ba36fb129dbc310a6f60160, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:48:38,796 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure e95716fa0605c3dca0262a68c3774d78, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:48:38,964 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:38,972 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:38,972 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => ee921e8b6ba36fb129dbc310a6f60160, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.', STARTKEY => '1', ENDKEY => ''} 2024-12-16T19:48:38,973 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. service=AccessControlService 2024-12-16T19:48:38,973 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:48:38,973 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:38,973 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:48:38,973 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:38,974 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:38,976 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:38,998 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:38,998 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => e95716fa0605c3dca0262a68c3774d78, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.', STARTKEY => '', ENDKEY => '1'} 2024-12-16T19:48:38,998 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. service=AccessControlService 2024-12-16T19:48:38,998 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-16T19:48:38,999 INFO [StoreOpener-ee921e8b6ba36fb129dbc310a6f60160-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:38,999 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:38,999 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-16T19:48:38,999 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:38,999 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:39,001 INFO [StoreOpener-ee921e8b6ba36fb129dbc310a6f60160-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ee921e8b6ba36fb129dbc310a6f60160 columnFamilyName cf 2024-12-16T19:48:39,003 DEBUG [StoreOpener-ee921e8b6ba36fb129dbc310a6f60160-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:48:39,004 INFO [StoreOpener-ee921e8b6ba36fb129dbc310a6f60160-1 {}] regionserver.HStore(327): Store=ee921e8b6ba36fb129dbc310a6f60160/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:48:39,005 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:39,005 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:39,008 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:39,010 INFO [StoreOpener-e95716fa0605c3dca0262a68c3774d78-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:39,012 INFO [StoreOpener-e95716fa0605c3dca0262a68c3774d78-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e95716fa0605c3dca0262a68c3774d78 columnFamilyName cf 2024-12-16T19:48:39,014 DEBUG [StoreOpener-e95716fa0605c3dca0262a68c3774d78-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:48:39,014 INFO [StoreOpener-e95716fa0605c3dca0262a68c3774d78-1 {}] regionserver.HStore(327): Store=e95716fa0605c3dca0262a68c3774d78/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-16T19:48:39,015 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:39,016 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:39,017 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:39,018 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:48:39,019 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened ee921e8b6ba36fb129dbc310a6f60160; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61608158, jitterRate=-0.08196690678596497}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:48:39,019 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-16T19:48:39,020 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for ee921e8b6ba36fb129dbc310a6f60160: 2024-12-16T19:48:39,020 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened e95716fa0605c3dca0262a68c3774d78; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60288993, jitterRate=-0.10162399709224701}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-16T19:48:39,020 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for e95716fa0605c3dca0262a68c3774d78: 2024-12-16T19:48:39,022 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78., pid=202, masterSystemTime=1734378518976 2024-12-16T19:48:39,026 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160., pid=201, masterSystemTime=1734378518964 2024-12-16T19:48:39,036 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:39,036 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:39,036 DEBUG [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:39,036 INFO [RS_OPEN_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:39,039 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=e95716fa0605c3dca0262a68c3774d78, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:39,039 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=ee921e8b6ba36fb129dbc310a6f60160, regionState=OPEN, openSeqNum=2, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:39,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199 2024-12-16T19:48:39,045 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure e95716fa0605c3dca0262a68c3774d78, server=7a6e4bf70377,42741,1734378236851 in 255 msec 2024-12-16T19:48:39,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, ASSIGN in 426 msec 2024-12-16T19:48:39,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-16T19:48:39,048 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, ASSIGN in 427 msec 2024-12-16T19:48:39,050 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-16T19:48:39,050 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378519050"}]},"ts":"1734378519050"} 2024-12-16T19:48:39,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200 2024-12-16T19:48:39,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure ee921e8b6ba36fb129dbc310a6f60160, server=7a6e4bf70377,42861,1734378237024 in 255 msec 2024-12-16T19:48:39,070 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-16T19:48:39,113 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-16T19:48:39,114 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-16T19:48:39,119 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-16T19:48:39,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:39,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:39,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:39,151 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:39,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 1.5590 sec 2024-12-16T19:48:39,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,175 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,175 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:39,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-16T19:48:39,754 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-16T19:48:39,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:39,770 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:39,770 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:48:39,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T19:48:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378519846 (current time:1734378519846). 2024-12-16T19:48:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:48:39,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-16T19:48:39,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:48:39,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1854ecbe to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b39d58 2024-12-16T19:48:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46b7872a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:48:39,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:48:39,901 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38230, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:48:39,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1854ecbe to 127.0.0.1:62515 2024-12-16T19:48:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:48:39,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x314a7ba4 to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@684a6988 2024-12-16T19:48:39,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@123e2d7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:48:39,954 DEBUG [hconnection-0x3ee862c2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:48:39,955 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:48:39,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:48:39,957 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:48:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x314a7ba4 to 127.0.0.1:62515 2024-12-16T19:48:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:48:39,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-16T19:48:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:48:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T19:48:39,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-16T19:48:39,967 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:48:39,967 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:48:39,969 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:48:39,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T19:48:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742375_1551 (size=203) 2024-12-16T19:48:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742375_1551 (size=203) 2024-12-16T19:48:39,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742375_1551 (size=203) 2024-12-16T19:48:40,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T19:48:40,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T19:48:40,402 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:48:40,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160}] 2024-12-16T19:48:40,406 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:40,407 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:40,558 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:40,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-16T19:48:40,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:40,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for ee921e8b6ba36fb129dbc310a6f60160: 2024-12-16T19:48:40,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T19:48:40,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:40,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:48:40,560 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:48:40,562 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:40,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-16T19:48:40,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:40,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for e95716fa0605c3dca0262a68c3774d78: 2024-12-16T19:48:40,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T19:48:40,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:40,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:48:40,564 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-16T19:48:40,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T19:48:40,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742377_1553 (size=82) 2024-12-16T19:48:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742376_1552 (size=82) 2024-12-16T19:48:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742377_1553 (size=82) 2024-12-16T19:48:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742376_1552 (size=82) 2024-12-16T19:48:40,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742377_1553 (size=82) 2024-12-16T19:48:40,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742376_1552 (size=82) 2024-12-16T19:48:40,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:40,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-16T19:48:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-16T19:48:40,745 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:40,745 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:40,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:40,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-16T19:48:40,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-16T19:48:40,746 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:40,747 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:40,748 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160 in 345 msec 2024-12-16T19:48:40,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=204, resume processing ppid=203 2024-12-16T19:48:40,750 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:48:40,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78 in 345 msec 2024-12-16T19:48:40,754 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:48:40,770 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:48:40,770 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:48:40,770 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:48:40,772 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-12-16T19:48:40,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742378_1554 (size=74) 2024-12-16T19:48:40,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742378_1554 (size=74) 2024-12-16T19:48:40,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742378_1554 (size=74) 2024-12-16T19:48:40,881 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:48:40,881 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:40,882 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:40,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742379_1555 (size=697) 2024-12-16T19:48:40,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742379_1555 (size=697) 2024-12-16T19:48:40,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742379_1555 (size=697) 2024-12-16T19:48:40,962 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0009_000001 (auth:SIMPLE) from 127.0.0.1:53428 2024-12-16T19:48:40,975 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0009/container_1734378249643_0009_01_000001/launch_container.sh] 2024-12-16T19:48:40,975 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0009/container_1734378249643_0009_01_000001/container_tokens] 2024-12-16T19:48:40,975 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_2/usercache/jenkins/appcache/application_1734378249643_0009/container_1734378249643_0009_01_000001/sysfs] 2024-12-16T19:48:41,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T19:48:41,340 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:48:41,351 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:48:41,351 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:41,353 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:48:41,353 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-16T19:48:41,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 1.3910 sec 2024-12-16T19:48:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-16T19:48:42,078 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-16T19:48:42,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42861 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:48:42,089 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42741 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. with WAL disabled. Data may be lost in the event of a crash. 2024-12-16T19:48:42,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,092 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:42,092 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-16T19:48:42,105 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T19:48:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1734378522105 (current time:1734378522105). 2024-12-16T19:48:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-16T19:48:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-16T19:48:42,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-16T19:48:42,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2dea4e3a to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@52d374e5 2024-12-16T19:48:42,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@87adf09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:48:42,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:48:42,124 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38254, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:48:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2dea4e3a to 127.0.0.1:62515 2024-12-16T19:48:42,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:48:42,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ab6835c to 127.0.0.1:62515 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@338bb4b3 2024-12-16T19:48:42,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6dd7b75c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-16T19:48:42,153 DEBUG [hconnection-0x7ca6cc32-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:48:42,154 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:48:42,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-16T19:48:42,157 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-16T19:48:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ab6835c to 127.0.0.1:62515 2024-12-16T19:48:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:48:42,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-16T19:48:42,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-16T19:48:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-16T19:48:42,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-16T19:48:42,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T19:48:42,163 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-16T19:48:42,164 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-16T19:48:42,166 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-16T19:48:42,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742380_1556 (size=198) 2024-12-16T19:48:42,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742380_1556 (size=198) 2024-12-16T19:48:42,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742380_1556 (size=198) 2024-12-16T19:48:42,185 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-16T19:48:42,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160}] 2024-12-16T19:48:42,186 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:42,187 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:42,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T19:48:42,338 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:42,338 DEBUG [RSProcedureDispatcher-pool-3 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:42,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42861 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-16T19:48:42,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=42741 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-16T19:48:42,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:42,338 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:42,339 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing ee921e8b6ba36fb129dbc310a6f60160 1/1 column families, dataSize=2.87 KB heapSize=6.44 KB 2024-12-16T19:48:42,339 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing e95716fa0605c3dca0262a68c3774d78 1/1 column families, dataSize=400 B heapSize=1.09 KB 2024-12-16T19:48:42,358 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78 is 71, key is 02f736f72f95643a28d51d2e3083db5a/cf:q/1734378522089/Put/seqid=0 2024-12-16T19:48:42,384 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:48:42,399 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160 is 71, key is 115aa585d06e794e5a99cae6305028fa/cf:q/1734378522083/Put/seqid=0 2024-12-16T19:48:42,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742381_1557 (size=5311) 2024-12-16T19:48:42,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742381_1557 (size=5311) 2024-12-16T19:48:42,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742381_1557 (size=5311) 2024-12-16T19:48:42,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742382_1558 (size=7961) 2024-12-16T19:48:42,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742382_1558 (size=7961) 2024-12-16T19:48:42,415 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:48:42,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742382_1558 (size=7961) 2024-12-16T19:48:42,423 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:42,431 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/.tmp/cf/9cd338e110684ab99034482371542b9b, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=ee921e8b6ba36fb129dbc310a6f60160] 2024-12-16T19:48:42,432 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/.tmp/cf/9cd338e110684ab99034482371542b9b is 220, key is 1de2a0ec60b42c0d3833a44a7f7984395/cf:q/1734378522083/Put/seqid=0 2024-12-16T19:48:42,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742383_1559 (size=14877) 2024-12-16T19:48:42,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742383_1559 (size=14877) 2024-12-16T19:48:42,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742383_1559 (size=14877) 2024-12-16T19:48:42,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/.tmp/cf/9cd338e110684ab99034482371542b9b 2024-12-16T19:48:42,460 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/.tmp/cf/9cd338e110684ab99034482371542b9b as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf/9cd338e110684ab99034482371542b9b 2024-12-16T19:48:42,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T19:48:42,473 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf/9cd338e110684ab99034482371542b9b, entries=44, sequenceid=6, filesize=14.5 K 2024-12-16T19:48:42,479 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~2.87 KB/2936, heapSize ~6.42 KB/6576, currentSize=0 B/0 for ee921e8b6ba36fb129dbc310a6f60160 in 141ms, sequenceid=6, compaction requested=false 2024-12-16T19:48:42,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-16T19:48:42,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for ee921e8b6ba36fb129dbc310a6f60160: 2024-12-16T19:48:42,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T19:48:42,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:48:42,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf/9cd338e110684ab99034482371542b9b] hfiles 2024-12-16T19:48:42,480 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf/9cd338e110684ab99034482371542b9b for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742384_1560 (size=121) 2024-12-16T19:48:42,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742384_1560 (size=121) 2024-12-16T19:48:42,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:42,499 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-16T19:48:42,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-16T19:48:42,499 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:42,500 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:42,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure ee921e8b6ba36fb129dbc310a6f60160 in 315 msec 2024-12-16T19:48:42,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742384_1560 (size=121) 2024-12-16T19:48:42,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T19:48:42,806 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:48:42,811 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:42,812 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/.tmp/cf/c5260d479454461aabb09b7de92b7664, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=e95716fa0605c3dca0262a68c3774d78] 2024-12-16T19:48:42,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/.tmp/cf/c5260d479454461aabb09b7de92b7664 is 220, key is 080354d3084d1f66e861722f2a675ae37/cf:q/1734378522089/Put/seqid=0 2024-12-16T19:48:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742385_1561 (size=6608) 2024-12-16T19:48:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742385_1561 (size=6608) 2024-12-16T19:48:42,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742385_1561 (size=6608) 2024-12-16T19:48:42,824 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=400, hasBloomFilter=true, into tmp file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/.tmp/cf/c5260d479454461aabb09b7de92b7664 2024-12-16T19:48:42,829 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/.tmp/cf/c5260d479454461aabb09b7de92b7664 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf/c5260d479454461aabb09b7de92b7664 2024-12-16T19:48:42,833 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf/c5260d479454461aabb09b7de92b7664, entries=6, sequenceid=6, filesize=6.5 K 2024-12-16T19:48:42,834 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~400 B/400, heapSize ~1.08 KB/1104, currentSize=0 B/0 for e95716fa0605c3dca0262a68c3774d78 in 496ms, sequenceid=6, compaction requested=false 2024-12-16T19:48:42,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for e95716fa0605c3dca0262a68c3774d78: 2024-12-16T19:48:42,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-16T19:48:42,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,834 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-16T19:48:42,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf/c5260d479454461aabb09b7de92b7664] hfiles 2024-12-16T19:48:42,835 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf/c5260d479454461aabb09b7de92b7664 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742386_1562 (size=121) 2024-12-16T19:48:42,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742386_1562 (size=121) 2024-12-16T19:48:42,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742386_1562 (size=121) 2024-12-16T19:48:42,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:42,843 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/7a6e4bf70377:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-16T19:48:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-16T19:48:42,843 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:42,843 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:42,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=207, resume processing ppid=206 2024-12-16T19:48:42,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure e95716fa0605c3dca0262a68c3774d78 in 659 msec 2024-12-16T19:48:42,845 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-16T19:48:42,845 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-16T19:48:42,846 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-16T19:48:42,846 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-16T19:48:42,846 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-16T19:48:42,847 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78] hfiles 2024-12-16T19:48:42,847 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:42,847 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:42,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742387_1563 (size=305) 2024-12-16T19:48:42,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742387_1563 (size=305) 2024-12-16T19:48:42,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742387_1563 (size=305) 2024-12-16T19:48:42,853 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-16T19:48:42,853 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,854 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742388_1564 (size=1007) 2024-12-16T19:48:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742388_1564 (size=1007) 2024-12-16T19:48:42,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742388_1564 (size=1007) 2024-12-16T19:48:42,863 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-16T19:48:42,867 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-16T19:48:42,867 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:42,868 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-16T19:48:42,868 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-16T19:48:42,869 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 709 msec 2024-12-16T19:48:43,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-16T19:48:43,267 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-16T19:48:43,267 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267 2024-12-16T19:48:43,267 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:38709, tgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267, rawTgtDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267, srcFsUri=hdfs://localhost:38709, srcDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:48:43,296 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:38709, inputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c 2024-12-16T19:48:43,297 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:43,298 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-16T19:48:43,304 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:43,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742390_1566 (size=198) 2024-12-16T19:48:43,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742390_1566 (size=198) 2024-12-16T19:48:43,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742390_1566 (size=198) 2024-12-16T19:48:43,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742389_1565 (size=1007) 2024-12-16T19:48:43,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742389_1565 (size=1007) 2024-12-16T19:48:43,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742389_1565 (size=1007) 2024-12-16T19:48:43,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:43,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:43,322 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:43,323 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,320 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-18144521634290682861.jar 2024-12-16T19:48:44,320 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,320 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,390 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop-158980221747111759.jar 2024-12-16T19:48:44,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,391 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-16T19:48:44,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-16T19:48:44,392 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-16T19:48:44,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-16T19:48:44,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-16T19:48:44,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-16T19:48:44,393 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-16T19:48:44,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-16T19:48:44,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-16T19:48:44,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-16T19:48:44,394 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-16T19:48:44,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-16T19:48:44,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-16T19:48:44,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:44,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:44,395 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:48:44,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:44,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-16T19:48:44,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:48:44,396 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-16T19:48:44,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742391_1567 (size=127628) 2024-12-16T19:48:44,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742391_1567 (size=127628) 2024-12-16T19:48:44,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742391_1567 (size=127628) 2024-12-16T19:48:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742392_1568 (size=2172137) 2024-12-16T19:48:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742392_1568 (size=2172137) 2024-12-16T19:48:44,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742392_1568 (size=2172137) 2024-12-16T19:48:44,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742393_1569 (size=213228) 2024-12-16T19:48:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742393_1569 (size=213228) 2024-12-16T19:48:44,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742393_1569 (size=213228) 2024-12-16T19:48:44,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742394_1570 (size=1877034) 2024-12-16T19:48:44,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742394_1570 (size=1877034) 2024-12-16T19:48:44,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742394_1570 (size=1877034) 2024-12-16T19:48:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742395_1571 (size=533455) 2024-12-16T19:48:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742395_1571 (size=533455) 2024-12-16T19:48:44,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742395_1571 (size=533455) 2024-12-16T19:48:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742396_1572 (size=7280644) 2024-12-16T19:48:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742396_1572 (size=7280644) 2024-12-16T19:48:44,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742396_1572 (size=7280644) 2024-12-16T19:48:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742397_1573 (size=4188619) 2024-12-16T19:48:45,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742397_1573 (size=4188619) 2024-12-16T19:48:45,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742397_1573 (size=4188619) 2024-12-16T19:48:45,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742398_1574 (size=20406) 2024-12-16T19:48:45,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742398_1574 (size=20406) 2024-12-16T19:48:45,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742398_1574 (size=20406) 2024-12-16T19:48:45,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742399_1575 (size=75495) 2024-12-16T19:48:45,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742399_1575 (size=75495) 2024-12-16T19:48:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742399_1575 (size=75495) 2024-12-16T19:48:45,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742400_1576 (size=45609) 2024-12-16T19:48:45,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742400_1576 (size=45609) 2024-12-16T19:48:45,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742400_1576 (size=45609) 2024-12-16T19:48:45,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742401_1577 (size=110084) 2024-12-16T19:48:45,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742401_1577 (size=110084) 2024-12-16T19:48:45,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742401_1577 (size=110084) 2024-12-16T19:48:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742402_1578 (size=1323991) 2024-12-16T19:48:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742402_1578 (size=1323991) 2024-12-16T19:48:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742402_1578 (size=1323991) 2024-12-16T19:48:45,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742403_1579 (size=23076) 2024-12-16T19:48:45,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742403_1579 (size=23076) 2024-12-16T19:48:45,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742403_1579 (size=23076) 2024-12-16T19:48:45,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742404_1580 (size=126803) 2024-12-16T19:48:45,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742404_1580 (size=126803) 2024-12-16T19:48:45,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742404_1580 (size=126803) 2024-12-16T19:48:45,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742405_1581 (size=322274) 2024-12-16T19:48:45,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742405_1581 (size=322274) 2024-12-16T19:48:45,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742405_1581 (size=322274) 2024-12-16T19:48:45,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742406_1582 (size=1832290) 2024-12-16T19:48:45,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742406_1582 (size=1832290) 2024-12-16T19:48:45,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742406_1582 (size=1832290) 2024-12-16T19:48:45,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742407_1583 (size=30081) 2024-12-16T19:48:45,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742407_1583 (size=30081) 2024-12-16T19:48:45,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742407_1583 (size=30081) 2024-12-16T19:48:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742408_1584 (size=53616) 2024-12-16T19:48:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742408_1584 (size=53616) 2024-12-16T19:48:45,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742408_1584 (size=53616) 2024-12-16T19:48:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742409_1585 (size=29229) 2024-12-16T19:48:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742409_1585 (size=29229) 2024-12-16T19:48:45,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742409_1585 (size=29229) 2024-12-16T19:48:45,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742410_1586 (size=169089) 2024-12-16T19:48:45,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742410_1586 (size=169089) 2024-12-16T19:48:45,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742410_1586 (size=169089) 2024-12-16T19:48:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742411_1587 (size=451756) 2024-12-16T19:48:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742411_1587 (size=451756) 2024-12-16T19:48:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742411_1587 (size=451756) 2024-12-16T19:48:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742412_1588 (size=5175431) 2024-12-16T19:48:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742412_1588 (size=5175431) 2024-12-16T19:48:45,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742412_1588 (size=5175431) 2024-12-16T19:48:45,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742413_1589 (size=136454) 2024-12-16T19:48:45,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742413_1589 (size=136454) 2024-12-16T19:48:45,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742413_1589 (size=136454) 2024-12-16T19:48:45,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742414_1590 (size=907467) 2024-12-16T19:48:45,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742414_1590 (size=907467) 2024-12-16T19:48:45,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742414_1590 (size=907467) 2024-12-16T19:48:45,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742415_1591 (size=3317408) 2024-12-16T19:48:45,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742415_1591 (size=3317408) 2024-12-16T19:48:45,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742415_1591 (size=3317408) 2024-12-16T19:48:45,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742416_1592 (size=6350922) 2024-12-16T19:48:45,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742416_1592 (size=6350922) 2024-12-16T19:48:45,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742416_1592 (size=6350922) 2024-12-16T19:48:45,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742417_1593 (size=503880) 2024-12-16T19:48:45,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742417_1593 (size=503880) 2024-12-16T19:48:45,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742417_1593 (size=503880) 2024-12-16T19:48:45,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742418_1594 (size=4695811) 2024-12-16T19:48:45,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742418_1594 (size=4695811) 2024-12-16T19:48:45,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742418_1594 (size=4695811) 2024-12-16T19:48:45,212 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-16T19:48:45,214 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-16T19:48:45,216 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=33.9 K 2024-12-16T19:48:45,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742419_1595 (size=770) 2024-12-16T19:48:45,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742419_1595 (size=770) 2024-12-16T19:48:45,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742419_1595 (size=770) 2024-12-16T19:48:45,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742420_1596 (size=15) 2024-12-16T19:48:45,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742420_1596 (size=15) 2024-12-16T19:48:45,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742420_1596 (size=15) 2024-12-16T19:48:45,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742421_1597 (size=305049) 2024-12-16T19:48:45,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742421_1597 (size=305049) 2024-12-16T19:48:45,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742421_1597 (size=305049) 2024-12-16T19:48:45,256 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:48:45,256 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-16T19:48:45,494 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0010_000001 (auth:SIMPLE) from 127.0.0.1:44568 2024-12-16T19:48:46,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:46,039 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-16T19:48:46,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-16T19:48:50,788 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0010_000001 (auth:SIMPLE) from 127.0.0.1:39868 2024-12-16T19:48:51,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742422_1598 (size=350747) 2024-12-16T19:48:51,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742422_1598 (size=350747) 2024-12-16T19:48:51,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742422_1598 (size=350747) 2024-12-16T19:48:51,542 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:48:53,011 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1734378249643_0010_000001 (auth:SIMPLE) from 127.0.0.1:57824 2024-12-16T19:48:54,257 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:48:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742423_1599 (size=14877) 2024-12-16T19:48:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742423_1599 (size=14877) 2024-12-16T19:48:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742423_1599 (size=14877) 2024-12-16T19:48:56,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742424_1600 (size=7961) 2024-12-16T19:48:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742424_1600 (size=7961) 2024-12-16T19:48:56,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742424_1600 (size=7961) 2024-12-16T19:48:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742425_1601 (size=6608) 2024-12-16T19:48:56,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742425_1601 (size=6608) 2024-12-16T19:48:56,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742425_1601 (size=6608) 2024-12-16T19:48:56,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742426_1602 (size=5311) 2024-12-16T19:48:56,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742426_1602 (size=5311) 2024-12-16T19:48:56,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742426_1602 (size=5311) 2024-12-16T19:48:56,640 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-16T19:48:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742427_1603 (size=17473) 2024-12-16T19:48:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742427_1603 (size=17473) 2024-12-16T19:48:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742427_1603 (size=17473) 2024-12-16T19:48:56,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742428_1604 (size=476) 2024-12-16T19:48:56,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742428_1604 (size=476) 2024-12-16T19:48:56,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742428_1604 (size=476) 2024-12-16T19:48:56,858 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-16T19:48:57,042 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-16T19:48:57,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742429_1605 (size=17473) 2024-12-16T19:48:57,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742429_1605 (size=17473) 2024-12-16T19:48:57,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742429_1605 (size=17473) 2024-12-16T19:48:57,115 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0010/container_1734378249643_0010_01_000002/launch_container.sh] 2024-12-16T19:48:57,115 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0010/container_1734378249643_0010_01_000002/container_tokens] 2024-12-16T19:48:57,115 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-0_0/usercache/jenkins/appcache/application_1734378249643_0010/container_1734378249643_0010_01_000002/sysfs] 2024-12-16T19:48:57,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742430_1606 (size=350747) 2024-12-16T19:48:57,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742430_1606 (size=350747) 2024-12-16T19:48:57,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742430_1606 (size=350747) 2024-12-16T19:48:57,330 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-16T19:48:57,334 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-16T19:48:58,394 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-16T19:48:58,394 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-16T19:48:58,400 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,400 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-16T19:48:58,400 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-16T19:48:58,400 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,401 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-16T19:48:58,401 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-16T19:48:58,401 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_1802764255_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,401 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-16T19:48:58,401 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/export-test/export-1734378523267/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-16T19:48:58,407 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,407 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-16T19:48:58,409 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378538409"}]},"ts":"1734378538409"} 2024-12-16T19:48:58,410 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-16T19:48:58,461 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-16T19:48:58,462 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-16T19:48:58,463 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, UNASSIGN}] 2024-12-16T19:48:58,464 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, UNASSIGN 2024-12-16T19:48:58,464 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, UNASSIGN 2024-12-16T19:48:58,465 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=ee921e8b6ba36fb129dbc310a6f60160, regionState=CLOSING, regionLocation=7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:58,465 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=e95716fa0605c3dca0262a68c3774d78, regionState=CLOSING, regionLocation=7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:58,467 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:48:58,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE; CloseRegionProcedure ee921e8b6ba36fb129dbc310a6f60160, server=7a6e4bf70377,42861,1734378237024}] 2024-12-16T19:48:58,468 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-16T19:48:58,468 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=211, state=RUNNABLE; CloseRegionProcedure e95716fa0605c3dca0262a68c3774d78, server=7a6e4bf70377,42741,1734378236851}] 2024-12-16T19:48:58,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-16T19:48:58,619 DEBUG [RSProcedureDispatcher-pool-5 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42861,1734378237024 2024-12-16T19:48:58,620 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:58,620 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:48:58,620 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing ee921e8b6ba36fb129dbc310a6f60160, disabling compactions & flushes 2024-12-16T19:48:58,620 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:58,620 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:58,620 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. after waiting 0 ms 2024-12-16T19:48:58,620 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:58,620 DEBUG [RSProcedureDispatcher-pool-4 {}] master.ServerManager(801): New admin connection to 7a6e4bf70377,42741,1734378236851 2024-12-16T19:48:58,621 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:58,621 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-16T19:48:58,621 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing e95716fa0605c3dca0262a68c3774d78, disabling compactions & flushes 2024-12-16T19:48:58,621 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:58,621 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:58,621 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. after waiting 0 ms 2024-12-16T19:48:58,621 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:58,624 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:48:58,624 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:48:58,624 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:48:58,624 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:48:58,624 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78. 2024-12-16T19:48:58,624 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for e95716fa0605c3dca0262a68c3774d78: 2024-12-16T19:48:58,624 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160. 2024-12-16T19:48:58,624 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for ee921e8b6ba36fb129dbc310a6f60160: 2024-12-16T19:48:58,626 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:58,626 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=e95716fa0605c3dca0262a68c3774d78, regionState=CLOSED 2024-12-16T19:48:58,627 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:58,627 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=ee921e8b6ba36fb129dbc310a6f60160, regionState=CLOSED 2024-12-16T19:48:58,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=211 2024-12-16T19:48:58,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=211, state=SUCCESS; CloseRegionProcedure e95716fa0605c3dca0262a68c3774d78, server=7a6e4bf70377,42741,1734378236851 in 159 msec 2024-12-16T19:48:58,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=212 2024-12-16T19:48:58,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=212, state=SUCCESS; CloseRegionProcedure ee921e8b6ba36fb129dbc310a6f60160, server=7a6e4bf70377,42861,1734378237024 in 161 msec 2024-12-16T19:48:58,629 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=e95716fa0605c3dca0262a68c3774d78, UNASSIGN in 166 msec 2024-12-16T19:48:58,630 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=212, resume processing ppid=210 2024-12-16T19:48:58,630 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ee921e8b6ba36fb129dbc310a6f60160, UNASSIGN in 166 msec 2024-12-16T19:48:58,631 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-16T19:48:58,632 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 169 msec 2024-12-16T19:48:58,632 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734378538632"}]},"ts":"1734378538632"} 2024-12-16T19:48:58,633 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-16T19:48:58,644 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-16T19:48:58,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 238 msec 2024-12-16T19:48:58,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-16T19:48:58,711 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-16T19:48:58,712 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,713 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,714 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,715 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45217 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,717 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:58,717 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:58,718 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/recovered.edits] 2024-12-16T19:48:58,718 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf, FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/recovered.edits] 2024-12-16T19:48:58,721 DEBUG [HFileArchiver-29 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf/9cd338e110684ab99034482371542b9b to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/cf/9cd338e110684ab99034482371542b9b 2024-12-16T19:48:58,722 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf/c5260d479454461aabb09b7de92b7664 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/cf/c5260d479454461aabb09b7de92b7664 2024-12-16T19:48:58,724 DEBUG [HFileArchiver-30 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78/recovered.edits/9.seqid 2024-12-16T19:48:58,724 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/recovered.edits/9.seqid to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160/recovered.edits/9.seqid 2024-12-16T19:48:58,725 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:58,725 DEBUG [HFileArchiver-28 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testtb-testExportFileSystemStateWithSkipTmp/ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:58,725 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-16T19:48:58,726 DEBUG [PEWorker-3 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-16T19:48:58,726 DEBUG [PEWorker-3 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-12-16T19:48:58,730 DEBUG [HFileArchiver-31 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b20241216aeddd65c1fa849c78e09c73703abe6cc_ee921e8b6ba36fb129dbc310a6f60160 2024-12-16T19:48:58,730 DEBUG [HFileArchiver-32 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78 to hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e202412167c7ea99204f84ae28071feafe9adef97_e95716fa0605c3dca0262a68c3774d78 2024-12-16T19:48:58,731 DEBUG [PEWorker-3 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-12-16T19:48:58,733 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,735 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-16T19:48:58,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,736 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T19:48:58,736 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T19:48:58,736 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T19:48:58,736 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-16T19:48:58,741 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(408): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-16T19:48:58,742 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,742 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(398): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-16T19:48:58,743 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378538742"}]},"ts":"9223372036854775807"} 2024-12-16T19:48:58,743 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734378538742"}]},"ts":"9223372036854775807"} 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-16T19:48:58,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-16T19:48:58,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:58,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:58,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:58,745 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-16T19:48:58,746 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-16T19:48:58,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e95716fa0605c3dca0262a68c3774d78, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1734378517608.e95716fa0605c3dca0262a68c3774d78.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ee921e8b6ba36fb129dbc310a6f60160, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1734378517608.ee921e8b6ba36fb129dbc310a6f60160.', STARTKEY => '1', ENDKEY => ''}] 2024-12-16T19:48:58,746 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(402): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-16T19:48:58,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734378538746"}]},"ts":"9223372036854775807"} 2024-12-16T19:48:58,748 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-16T19:48:58,759 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,760 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 47 msec 2024-12-16T19:48:58,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-16T19:48:58,846 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-16T19:48:58,852 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-16T19:48:58,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,856 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-16T19:48:58,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-16T19:48:58,881 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=808 (was 805) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 82381) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1422999152) connection to localhost/127.0.0.1:44709 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-8271 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:38404 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-53 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44709 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:45642 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x736823cc-shared-pool-52 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1802764255_22 at /127.0.0.1:54722 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1959844508_1 at /127.0.0.1:38372 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=814 (was 813) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=874 (was 978), ProcessCount=14 (was 20), AvailableMemoryMB=2054 (was 2565) 2024-12-16T19:48:58,881 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=808 is superior to 500 2024-12-16T19:48:58,881 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-16T19:48:58,889 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2d9dead1{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T19:48:58,893 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@68244ab2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:48:58,893 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:48:58,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5b8566ee{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T19:48:58,893 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44abea2f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:48:58,918 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1734378249643_0010_01_000001 is : 143 2024-12-16T19:48:58,920 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/MiniMRCluster_1353879548/yarn-3833710179/MiniMRCluster_1353879548-localDir-nm-1_0/usercache/jenkins/appcache/application_1734378249643_0010/container_1734378249643_0010_01_000001/sysfs] 2024-12-16T19:49:00,222 INFO [7a6e4bf70377:42861Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-16T19:49:00,222 INFO [7a6e4bf70377:45217Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-16T19:49:00,232 INFO [7a6e4bf70377:42741Replication Statistics #0 {}] regionserver.Replication$ReplicationStatisticsTask(247): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-16T19:49:01,868 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-16T19:49:01,871 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.BaseLoadBalancer(778): Start Generate Balance plan for cluster. 2024-12-16T19:49:01,872 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(202): Hosts are {7a6e4bf70377=0} racks are {/default-rack=0} 2024-12-16T19:49:01,872 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-16T19:49:01,872 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-16T19:49:01,872 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-16T19:49:01,872 INFO [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-16T19:49:01,872 INFO [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-16T19:49:01,872 INFO [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-16T19:49:01,872 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] balancer.BalancerClusterState(319): Number of tables=4, number of hosts=1, number of racks=1 2024-12-16T19:49:01,875 INFO [master/7a6e4bf70377:0.Chore.1 {}] balancer.StochasticLoadBalancer(421): Cluster wide - skipping load balancing because weighted average imbalance=0.014082485040843942 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8313599307046383, need balance); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8641712682129723, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-16T19:49:01,875 DEBUG [master/7a6e4bf70377:0.Chore.1 {}] master.HMaster(2144): Balancer is going into sleep until next period in 300000ms 2024-12-16T19:49:01,880 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-16T19:49:03,163 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/info has an old edit so flush to free WALs after random delay 148766 ms 2024-12-16T19:49:04,153 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:49:04,159 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. because 3893a032022fb38e1a88fc4547193141/info has an old edit so flush to free WALs after random delay 138339 ms 2024-12-16T19:49:06,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-16T19:49:11,541 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:49:13,143 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(2070): MemstoreFlusherChore requesting flush of hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. because 3e93dad405937162271b1d330fba4cbb/l has an old edit so flush to free WALs after random delay 222093 ms 2024-12-16T19:49:15,911 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@775e4a7b{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-16T19:49:15,911 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@95a6b81{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:49:15,912 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:49:15,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77ddd41a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T19:49:15,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b0f1d9a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:49:22,555 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 9607b295cff8a6b815204957546cba58, had cached 0 bytes from a total of 6284 2024-12-16T19:49:22,556 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 34c062451cb346b64f29e9d2d97ae5fd, had cached 0 bytes from a total of 14267 2024-12-16T19:49:24,257 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:49:32,921 ERROR [Thread[Thread-420,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-16T19:49:32,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@95ddd44{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-16T19:49:32,922 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@18f96a02{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:49:32,923 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:49:32,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ff0711a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T19:49:32,923 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@216a9072{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:49:32,930 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-16T19:49:32,941 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-16T19:49:32,941 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-16T19:49:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741830_1006 (size=953604) 2024-12-16T19:49:32,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741830_1006 (size=953604) 2024-12-16T19:49:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741830_1006 (size=953604) 2024-12-16T19:49:32,945 ERROR [Thread[Thread-443,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-16T19:49:32,948 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea41eaa{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-16T19:49:32,949 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c65d0ae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:49:32,949 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:49:32,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ce19e11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-16T19:49:32,949 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c1ae3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:49:32,950 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-16T19:49:32,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-16T19:49:32,951 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-16T19:49:32,951 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-16T19:49:32,951 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3301b7a5 to 127.0.0.1:62515 2024-12-16T19:49:32,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:32,951 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-16T19:49:32,951 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=258251649, stopped=false 2024-12-16T19:49:32,952 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:32,952 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-16T19:49:32,952 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=7a6e4bf70377,45883,1734378235170 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:49:32,993 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:49:32,993 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-16T19:49:32,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:49:32,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:49:32,993 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:32,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:49:32,993 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-16T19:49:32,994 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,45217,1734378236605' ***** 2024-12-16T19:49:32,994 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:32,994 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T19:49:32,994 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,42741,1734378236851' ***** 2024-12-16T19:49:32,994 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:32,994 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T19:49:32,994 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,42861,1734378237024' ***** 2024-12-16T19:49:32,994 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:32,994 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-16T19:49:32,994 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T19:49:32,994 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T19:49:32,994 INFO [RS:1;7a6e4bf70377:42741 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T19:49:32,995 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T19:49:32,995 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T19:49:32,995 INFO [RS:0;7a6e4bf70377:45217 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T19:49:32,995 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-16T19:49:32,995 INFO [RS:0;7a6e4bf70377:45217 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T19:49:32,995 INFO [RS:2;7a6e4bf70377:42861 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-16T19:49:32,995 INFO [RS:2;7a6e4bf70377:42861 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-16T19:49:32,995 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-16T19:49:32,995 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(3579): Received CLOSE for 3e93dad405937162271b1d330fba4cbb 2024-12-16T19:49:32,995 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(3579): Received CLOSE for 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(3579): Received CLOSE for 3893a032022fb38e1a88fc4547193141 2024-12-16T19:49:32,995 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,42861,1734378237024 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,42741,1734378236851 2024-12-16T19:49:32,995 DEBUG [RS:2;7a6e4bf70377:42861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:32,995 DEBUG [RS:1;7a6e4bf70377:42741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:32,995 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(3579): Received CLOSE for 9607b295cff8a6b815204957546cba58 2024-12-16T19:49:32,995 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,45217,1734378236605 2024-12-16T19:49:32,995 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T19:49:32,995 DEBUG [RS:0;7a6e4bf70377:45217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T19:49:32,995 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1603): Online Regions={34c062451cb346b64f29e9d2d97ae5fd=testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd.} 2024-12-16T19:49:32,995 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T19:49:32,995 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1603): Online Regions={3e93dad405937162271b1d330fba4cbb=hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb., 9607b295cff8a6b815204957546cba58=testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58.} 2024-12-16T19:49:32,995 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3893a032022fb38e1a88fc4547193141, disabling compactions & flushes 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 34c062451cb346b64f29e9d2d97ae5fd, disabling compactions & flushes 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 3e93dad405937162271b1d330fba4cbb, disabling compactions & flushes 2024-12-16T19:49:32,996 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:49:32,996 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:49:32,996 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. after waiting 0 ms 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. after waiting 0 ms 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. after waiting 0 ms 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:49:32,996 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:49:32,996 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3893a032022fb38e1a88fc4547193141 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-16T19:49:32,996 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 3e93dad405937162271b1d330fba4cbb 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-16T19:49:32,998 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-16T19:49:32,998 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 3893a032022fb38e1a88fc4547193141=hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141.} 2024-12-16T19:49:32,998 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-16T19:49:32,998 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-16T19:49:32,998 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-16T19:49:32,998 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-16T19:49:32,998 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-16T19:49:32,998 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-16T19:49:32,999 DEBUG [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1629): Waiting on 34c062451cb346b64f29e9d2d97ae5fd 2024-12-16T19:49:32,999 DEBUG [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 3893a032022fb38e1a88fc4547193141 2024-12-16T19:49:32,999 DEBUG [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1629): Waiting on 3e93dad405937162271b1d330fba4cbb, 9607b295cff8a6b815204957546cba58 2024-12-16T19:49:33,006 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/34c062451cb346b64f29e9d2d97ae5fd/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T19:49:33,006 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:33,006 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:49:33,006 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 34c062451cb346b64f29e9d2d97ae5fd: 2024-12-16T19:49:33,007 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd. 2024-12-16T19:49:33,012 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/.tmp/info/a4f10b834ac24084b635140538b480ed is 45, key is default/info:d/1734378243245/Put/seqid=0 2024-12-16T19:49:33,015 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/.tmp/l/0ba95e0faa084f6698d13ac1a5482484 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1734378424337/DeleteFamily/seqid=0 2024-12-16T19:49:33,026 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/info/3cd8fc387b9248b8a448642b233cb7d4 is 173, key is testExportExpiredSnapshot,1,1734378427130.34c062451cb346b64f29e9d2d97ae5fd./info:regioninfo/1734378427568/Put/seqid=0 2024-12-16T19:49:33,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742431_1607 (size=5037) 2024-12-16T19:49:33,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742431_1607 (size=5037) 2024-12-16T19:49:33,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742431_1607 (size=5037) 2024-12-16T19:49:33,036 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/.tmp/info/a4f10b834ac24084b635140538b480ed 2024-12-16T19:49:33,037 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T19:49:33,037 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T19:49:33,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742432_1608 (size=5695) 2024-12-16T19:49:33,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742432_1608 (size=5695) 2024-12-16T19:49:33,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742432_1608 (size=5695) 2024-12-16T19:49:33,047 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/.tmp/l/0ba95e0faa084f6698d13ac1a5482484 2024-12-16T19:49:33,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742433_1609 (size=15630) 2024-12-16T19:49:33,049 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/.tmp/info/a4f10b834ac24084b635140538b480ed as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/info/a4f10b834ac24084b635140538b480ed 2024-12-16T19:49:33,050 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/info/3cd8fc387b9248b8a448642b233cb7d4 2024-12-16T19:49:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742433_1609 (size=15630) 2024-12-16T19:49:33,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742433_1609 (size=15630) 2024-12-16T19:49:33,060 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0ba95e0faa084f6698d13ac1a5482484 2024-12-16T19:49:33,061 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/.tmp/l/0ba95e0faa084f6698d13ac1a5482484 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/l/0ba95e0faa084f6698d13ac1a5482484 2024-12-16T19:49:33,069 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/info/a4f10b834ac24084b635140538b480ed, entries=2, sequenceid=6, filesize=4.9 K 2024-12-16T19:49:33,071 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0ba95e0faa084f6698d13ac1a5482484 2024-12-16T19:49:33,071 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/l/0ba95e0faa084f6698d13ac1a5482484, entries=12, sequenceid=27, filesize=5.6 K 2024-12-16T19:49:33,073 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/rep_barrier/76ed6ec82810411e99ffccc1286f58d2 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b./rep_barrier:/1734378424352/DeleteFamily/seqid=0 2024-12-16T19:49:33,074 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 3893a032022fb38e1a88fc4547193141 in 78ms, sequenceid=6, compaction requested=false 2024-12-16T19:49:33,074 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 3e93dad405937162271b1d330fba4cbb in 78ms, sequenceid=27, compaction requested=false 2024-12-16T19:49:33,075 INFO [regionserver/7a6e4bf70377:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T19:49:33,082 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/namespace/3893a032022fb38e1a88fc4547193141/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-16T19:49:33,083 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:33,083 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:49:33,083 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3893a032022fb38e1a88fc4547193141: 2024-12-16T19:49:33,083 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734378241894.3893a032022fb38e1a88fc4547193141. 2024-12-16T19:49:33,086 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/acl/3e93dad405937162271b1d330fba4cbb/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:33,087 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 3e93dad405937162271b1d330fba4cbb: 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1734378243460.3e93dad405937162271b1d330fba4cbb. 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9607b295cff8a6b815204957546cba58, disabling compactions & flushes 2024-12-16T19:49:33,087 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. after waiting 0 ms 2024-12-16T19:49:33,087 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:49:33,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742434_1610 (size=8007) 2024-12-16T19:49:33,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742434_1610 (size=8007) 2024-12-16T19:49:33,091 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/rep_barrier/76ed6ec82810411e99ffccc1286f58d2 2024-12-16T19:49:33,092 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/default/testExportExpiredSnapshot/9607b295cff8a6b815204957546cba58/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-16T19:49:33,094 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:33,094 INFO [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:49:33,094 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9607b295cff8a6b815204957546cba58: 2024-12-16T19:49:33,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742434_1610 (size=8007) 2024-12-16T19:49:33,094 DEBUG [RS_CLOSE_REGION-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1734378427130.9607b295cff8a6b815204957546cba58. 2024-12-16T19:49:33,122 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/table/f931d77737b54f16bd8c933a2a4cdabd is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1734378406197.4e505bd67acb2881a003b7511655184b./table:/1734378424352/DeleteFamily/seqid=0 2024-12-16T19:49:33,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073742435_1611 (size=8861) 2024-12-16T19:49:33,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073742435_1611 (size=8861) 2024-12-16T19:49:33,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073742435_1611 (size=8861) 2024-12-16T19:49:33,137 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/table/f931d77737b54f16bd8c933a2a4cdabd 2024-12-16T19:49:33,142 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/info/3cd8fc387b9248b8a448642b233cb7d4 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/info/3cd8fc387b9248b8a448642b233cb7d4 2024-12-16T19:49:33,147 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/info/3cd8fc387b9248b8a448642b233cb7d4, entries=84, sequenceid=202, filesize=15.3 K 2024-12-16T19:49:33,148 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/rep_barrier/76ed6ec82810411e99ffccc1286f58d2 as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/rep_barrier/76ed6ec82810411e99ffccc1286f58d2 2024-12-16T19:49:33,158 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/rep_barrier/76ed6ec82810411e99ffccc1286f58d2, entries=21, sequenceid=202, filesize=7.8 K 2024-12-16T19:49:33,159 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-16T19:49:33,159 INFO [regionserver/7a6e4bf70377:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-16T19:49:33,163 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/.tmp/table/f931d77737b54f16bd8c933a2a4cdabd as hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/table/f931d77737b54f16bd8c933a2a4cdabd 2024-12-16T19:49:33,175 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/table/f931d77737b54f16bd8c933a2a4cdabd, entries=38, sequenceid=202, filesize=8.7 K 2024-12-16T19:49:33,179 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 181ms, sequenceid=202, compaction requested=false 2024-12-16T19:49:33,189 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-16T19:49:33,190 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:33,190 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T19:49:33,190 INFO [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-16T19:49:33,190 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-16T19:49:33,190 DEBUG [RS_CLOSE_META-regionserver/7a6e4bf70377:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-16T19:49:33,199 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,42861,1734378237024; all regions closed. 2024-12-16T19:49:33,199 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,45217,1734378236605; all regions closed. 2024-12-16T19:49:33,199 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,42741,1734378236851; all regions closed. 2024-12-16T19:49:33,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741834_1010 (size=13175) 2024-12-16T19:49:33,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741833_1009 (size=11760) 2024-12-16T19:49:33,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741834_1010 (size=13175) 2024-12-16T19:49:33,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741833_1009 (size=11760) 2024-12-16T19:49:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741834_1010 (size=13175) 2024-12-16T19:49:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741833_1009 (size=11760) 2024-12-16T19:49:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741836_1012 (size=80694) 2024-12-16T19:49:33,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741836_1012 (size=80694) 2024-12-16T19:49:33,207 DEBUG [RS:0;7a6e4bf70377:45217 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs 2024-12-16T19:49:33,207 INFO [RS:0;7a6e4bf70377:45217 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 7a6e4bf70377%2C45217%2C1734378236605:(num 1734378240344) 2024-12-16T19:49:33,207 DEBUG [RS:2;7a6e4bf70377:42861 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs 2024-12-16T19:49:33,207 INFO [RS:2;7a6e4bf70377:42861 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 7a6e4bf70377%2C42861%2C1734378237024:(num 1734378240343) 2024-12-16T19:49:33,207 DEBUG [RS:0;7a6e4bf70377:45217 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:33,207 DEBUG [RS:2;7a6e4bf70377:42861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:33,207 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T19:49:33,207 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T19:49:33,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741836_1012 (size=80694) 2024-12-16T19:49:33,207 INFO [RS:0;7a6e4bf70377:45217 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-12-16T19:49:33,208 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T19:49:33,208 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T19:49:33,208 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T19:49:33,208 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T19:49:33,208 INFO [RS:2;7a6e4bf70377:42861 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-16T19:49:33,208 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-16T19:49:33,208 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-16T19:49:33,208 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-16T19:49:33,208 INFO [RS:0;7a6e4bf70377:45217 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45217 2024-12-16T19:49:33,208 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T19:49:33,209 INFO [RS:2;7a6e4bf70377:42861 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42861 2024-12-16T19:49:33,210 DEBUG [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs 2024-12-16T19:49:33,210 INFO [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 7a6e4bf70377%2C42741%2C1734378236851.meta:.meta(num 1734378241145) 2024-12-16T19:49:33,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43893 is added to blk_1073741835_1011 (size=13339) 2024-12-16T19:49:33,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40033 is added to blk_1073741835_1011 (size=13339) 2024-12-16T19:49:33,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39241 is added to blk_1073741835_1011 (size=13339) 2024-12-16T19:49:33,216 DEBUG [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/oldWALs 2024-12-16T19:49:33,216 INFO [RS:1;7a6e4bf70377:42741 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 7a6e4bf70377%2C42741%2C1734378236851:(num 1734378240371) 2024-12-16T19:49:33,216 DEBUG [RS:1;7a6e4bf70377:42741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:33,216 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.LeaseManager(133): Closed leases 2024-12-16T19:49:33,216 INFO [RS:1;7a6e4bf70377:42741 {}] hbase.ChoreService(370): Chore service for: regionserver/7a6e4bf70377:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-16T19:49:33,217 INFO [regionserver/7a6e4bf70377:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T19:49:33,217 INFO [RS:1;7a6e4bf70377:42741 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42741 2024-12-16T19:49:33,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-16T19:49:33,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,42861,1734378237024 2024-12-16T19:49:33,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,45217,1734378236605 2024-12-16T19:49:33,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7a6e4bf70377,42741,1734378236851 2024-12-16T19:49:33,243 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,45217,1734378236605] 2024-12-16T19:49:33,243 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,45217,1734378236605; numProcessing=1 2024-12-16T19:49:33,259 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,45217,1734378236605 already deleted, retry=false 2024-12-16T19:49:33,259 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,45217,1734378236605 expired; onlineServers=2 2024-12-16T19:49:33,259 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,42861,1734378237024] 2024-12-16T19:49:33,259 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,42861,1734378237024; numProcessing=2 2024-12-16T19:49:33,267 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,42861,1734378237024 already deleted, retry=false 2024-12-16T19:49:33,267 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,42861,1734378237024 expired; onlineServers=1 2024-12-16T19:49:33,267 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7a6e4bf70377,42741,1734378236851] 2024-12-16T19:49:33,268 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 7a6e4bf70377,42741,1734378236851; numProcessing=3 2024-12-16T19:49:33,276 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/7a6e4bf70377,42741,1734378236851 already deleted, retry=false 2024-12-16T19:49:33,276 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 7a6e4bf70377,42741,1734378236851 expired; onlineServers=0 2024-12-16T19:49:33,276 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '7a6e4bf70377,45883,1734378235170' ***** 2024-12-16T19:49:33,276 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-16T19:49:33,276 DEBUG [M:0;7a6e4bf70377:45883 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7341e092, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7a6e4bf70377/172.17.0.2:0 2024-12-16T19:49:33,276 INFO [M:0;7a6e4bf70377:45883 {}] regionserver.HRegionServer(1224): stopping server 7a6e4bf70377,45883,1734378235170 2024-12-16T19:49:33,276 INFO [M:0;7a6e4bf70377:45883 {}] regionserver.HRegionServer(1250): stopping server 7a6e4bf70377,45883,1734378235170; all regions closed. 2024-12-16T19:49:33,276 DEBUG [M:0;7a6e4bf70377:45883 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-16T19:49:33,277 DEBUG [M:0;7a6e4bf70377:45883 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-16T19:49:33,277 DEBUG [M:0;7a6e4bf70377:45883 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-16T19:49:33,277 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-16T19:49:33,277 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734378239809 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.small.0-1734378239809,5,FailOnTimeoutGroup] 2024-12-16T19:49:33,277 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734378239804 {}] cleaner.HFileCleaner(306): Exit Thread[master/7a6e4bf70377:0:becomeActiveMaster-HFileCleaner.large.0-1734378239804,5,FailOnTimeoutGroup] 2024-12-16T19:49:33,277 INFO [M:0;7a6e4bf70377:45883 {}] hbase.ChoreService(370): Chore service for: master/7a6e4bf70377:0 had [] on shutdown 2024-12-16T19:49:33,277 DEBUG [M:0;7a6e4bf70377:45883 {}] master.HMaster(1733): Stopping service threads 2024-12-16T19:49:33,278 INFO [M:0;7a6e4bf70377:45883 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-16T19:49:33,278 INFO [M:0;7a6e4bf70377:45883 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-16T19:49:33,278 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-16T19:49:33,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-16T19:49:33,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-16T19:49:33,284 DEBUG [M:0;7a6e4bf70377:45883 {}] zookeeper.ZKUtil(347): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-16T19:49:33,285 WARN [M:0;7a6e4bf70377:45883 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-16T19:49:33,285 INFO [M:0;7a6e4bf70377:45883 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-16T19:49:33,285 INFO [M:0;7a6e4bf70377:45883 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-16T19:49:33,285 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-16T19:49:33,285 DEBUG [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-16T19:49:33,302 INFO [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:49:33,302 DEBUG [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:49:33,302 DEBUG [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-16T19:49:33,302 DEBUG [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-16T19:49:33,302 INFO [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=811.66 KB heapSize=973.69 KB 2024-12-16T19:49:33,302 ERROR [AsyncFSWAL-0-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:49:33,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:49:33,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:49:33,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42861-0x100e481a0950003, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:49:33,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45217-0x100e481a0950001, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:49:33,343 INFO [RS:0;7a6e4bf70377:45217 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,45217,1734378236605; zookeeper connection closed. 2024-12-16T19:49:33,343 INFO [RS:2;7a6e4bf70377:42861 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,42861,1734378237024; zookeeper connection closed. 2024-12-16T19:49:33,343 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2d8e023 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2d8e023 2024-12-16T19:49:33,343 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5cbdcca {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5cbdcca 2024-12-16T19:49:33,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:49:33,351 INFO [RS:1;7a6e4bf70377:42741 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,42741,1734378236851; zookeeper connection closed. 2024-12-16T19:49:33,351 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42741-0x100e481a0950002, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:49:33,351 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a605dd0 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a605dd0 2024-12-16T19:49:33,352 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-16T19:49:36,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:36,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-16T19:49:36,039 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-16T19:49:36,040 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-16T19:49:36,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-16T19:49:36,041 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-16T19:49:36,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:36,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-16T19:49:36,042 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-16T19:49:38,527 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:49:54,257 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:50:24,257 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;7a6e4bf70377:45883 225 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@3e41e187 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@536e0147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4057 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.CountDownLatch$Sync@7e453a1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12657 Waited count: 13311 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@31a60107 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@12614a4d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 805 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41-acceptor-0@72a1dc6d-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:34747}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74f576f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 38709): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2fb21527): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 135 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 38941 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1393 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47396ff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 38709): State: TIMED_WAITING Blocked count: 122 Waited count: 2367 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 38709): State: TIMED_WAITING Blocked count: 121 Waited count: 2371 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 38709): State: TIMED_WAITING Blocked count: 73 Waited count: 2377 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 38709): State: TIMED_WAITING Blocked count: 99 Waited count: 2372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 38709): State: TIMED_WAITING Blocked count: 95 Waited count: 2378 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b9fd904): State: TIMED_WAITING Blocked count: 0 Waited count: 201 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@365338c3): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@5be1a34a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@22ba5cc1): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1987425868)): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1763236781-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1763236781-88-acceptor-0@28c57ec0-ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:45121}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1763236781-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1763236781-90): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3fea6ec7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@e26d63): State: TIMED_WAITING Blocked count: 0 Waited count: 802 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36193): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 230 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b68bd3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1366 Waited count: 1523 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@781af2e0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 415 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp360438187-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp360438187-122-acceptor-0@46f20d0c-ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:33277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp360438187-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1422999152) connection to localhost/127.0.0.1:38709 from jenkins): State: TIMED_WAITING Blocked count: 1383 Waited count: 1384 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp360438187-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 0 Waited count: 1968 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-bfbca57-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6e7c0da6): State: TIMED_WAITING Blocked count: 0 Waited count: 801 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44575): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 262 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@99154bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1389 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3dc3e52b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp767364239-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp767364239-154-acceptor-0@e36c42f-ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:41355}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp767364239-155): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp767364239-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-902628f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f93de06): State: TIMED_WAITING Blocked count: 0 Waited count: 800 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40185): State: TIMED_WAITING Blocked count: 1 Waited count: 42 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 81 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 279 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18da6b11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1356 Waited count: 1525 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c0c9066): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 411 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 408 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 402 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 403 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (java.util.concurrent.ThreadPoolExecutor$Worker@13cf618e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@677b2ec3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@42b019c5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62515): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 200 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 20 Waited count: 719 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6134f26a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:62515):): State: WAITING Blocked count: 1 Waited count: 839 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 862 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a9edb39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 245 (LeaseRenewer:jenkins@localhost:38709): State: TIMED_WAITING Blocked count: 11 Waited count: 414 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37ec9f05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 316 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:62515)): State: RUNNABLE Blocked count: 49 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f91b45a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 33 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226e9b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51614a46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 193 Waited count: 811 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ff22cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 122 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@175e754a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883): State: WAITING Blocked count: 44 Waited count: 9172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22006578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57db498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61ed6558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4067e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@78774fa8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 308 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 328 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;7a6e4bf70377:45883): State: TIMED_WAITING Blocked count: 6 Waited count: 3354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$948/0x00007f17a8f1a6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 349 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (org.apache.hadoop.hdfs.PeerCache@5d0ed6dc): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 374 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3939 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 391 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 17 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 38 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39319 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 449 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46cd7caf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@546aaa7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44cfe7e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f32c6be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 495 (LeaseRenewer:jenkins.hfs.0@localhost:38709): State: TIMED_WAITING Blocked count: 11 Waited count: 412 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 506 (LeaseRenewer:jenkins.hfs.2@localhost:38709): State: TIMED_WAITING Blocked count: 11 Waited count: 410 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 508 (LeaseRenewer:jenkins.hfs.1@localhost:38709): State: TIMED_WAITING Blocked count: 11 Waited count: 410 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 38941 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 311 Waiting on java.util.concurrent.ForkJoinPool@30f8c93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 595 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 603 (region-location-1): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 604 (region-location-2): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (region-location-3): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1011 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 429 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1075 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1114 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17aad65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1124 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1332 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 314 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1571 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3c8f02ce Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1597 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 639 Waiting on java.util.concurrent.ForkJoinPool@30f8c93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2263 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2565 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5385 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5386 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5387 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9826 (AsyncFSWAL-1-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@439b6ad8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9828 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T19:50:54,257 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:51:24,258 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;7a6e4bf70377:45883 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@3e41e187 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@536e0147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4656 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@bd9d57e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12657 Waited count: 13312 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@31a60107 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@12614a4d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 925 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41-acceptor-0@72a1dc6d-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:34747}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74f576f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 38709): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2fb21527): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 44893 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1393 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47396ff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 38709): State: TIMED_WAITING Blocked count: 122 Waited count: 2428 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 38709): State: TIMED_WAITING Blocked count: 121 Waited count: 2432 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 38709): State: TIMED_WAITING Blocked count: 73 Waited count: 2439 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 38709): State: TIMED_WAITING Blocked count: 99 Waited count: 2434 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 38709): State: TIMED_WAITING Blocked count: 95 Waited count: 2439 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b9fd904): State: TIMED_WAITING Blocked count: 0 Waited count: 231 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@365338c3): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@5be1a34a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@22ba5cc1): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1987425868)): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1763236781-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1763236781-88-acceptor-0@28c57ec0-ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:45121}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1763236781-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1763236781-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3fea6ec7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@e26d63): State: TIMED_WAITING Blocked count: 0 Waited count: 922 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36193): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 250 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b68bd3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1386 Waited count: 1563 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@781af2e0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 486 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 478 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp360438187-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp360438187-122-acceptor-0@46f20d0c-ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:33277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp360438187-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1422999152) connection to localhost/127.0.0.1:38709 from jenkins): State: TIMED_WAITING Blocked count: 1443 Waited count: 1444 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp360438187-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 0 Waited count: 2028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-bfbca57-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6e7c0da6): State: TIMED_WAITING Blocked count: 0 Waited count: 921 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44575): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 282 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@99154bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1409 Waited count: 1574 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3dc3e52b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp767364239-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp767364239-154-acceptor-0@e36c42f-ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:41355}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp767364239-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp767364239-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-902628f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f93de06): State: TIMED_WAITING Blocked count: 0 Waited count: 920 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40185): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 299 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18da6b11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1376 Waited count: 1565 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c0c9066): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 471 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 462 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 463 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (java.util.concurrent.ThreadPoolExecutor$Worker@13cf618e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@677b2ec3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@42b019c5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62515): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 230 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 20 Waited count: 723 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6134f26a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:62515):): State: WAITING Blocked count: 1 Waited count: 843 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 866 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a9edb39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37ec9f05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 344 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:62515)): State: RUNNABLE Blocked count: 49 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f91b45a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 33 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226e9b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51614a46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 193 Waited count: 811 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ff22cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 122 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@175e754a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883): State: WAITING Blocked count: 44 Waited count: 9172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22006578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57db498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61ed6558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4067e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@78774fa8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 308 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 328 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;7a6e4bf70377:45883): State: TIMED_WAITING Blocked count: 6 Waited count: 3354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$948/0x00007f17a8f1a6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 349 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (org.apache.hadoop.hdfs.PeerCache@5d0ed6dc): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 374 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4539 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 391 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 17 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 38 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27e6fa73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45321 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 449 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46cd7caf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@546aaa7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44cfe7e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f32c6be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 44943 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 585 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 312 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 595 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 603 (region-location-1): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 604 (region-location-2): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (region-location-3): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1011 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 435 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1075 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1114 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17aad65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1124 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1571 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3c8f02ce Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1597 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 639 Waiting on java.util.concurrent.ForkJoinPool@30f8c93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2263 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2565 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5385 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5386 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5387 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9826 (AsyncFSWAL-1-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@439b6ad8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9828 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T19:51:54,258 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:52:24,258 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;7a6e4bf70377:45883 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@3e41e187 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@536e0147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5256 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@4f842fb9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12657 Waited count: 13313 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@31a60107 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@12614a4d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 1045 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41-acceptor-0@72a1dc6d-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:34747}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74f576f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 38709): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2fb21527): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 175 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 50842 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1393 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47396ff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 38709): State: TIMED_WAITING Blocked count: 122 Waited count: 2489 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 38709): State: TIMED_WAITING Blocked count: 121 Waited count: 2493 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 38709): State: TIMED_WAITING Blocked count: 73 Waited count: 2500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 38709): State: TIMED_WAITING Blocked count: 99 Waited count: 2495 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 38709): State: TIMED_WAITING Blocked count: 95 Waited count: 2500 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b9fd904): State: TIMED_WAITING Blocked count: 0 Waited count: 261 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@365338c3): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@5be1a34a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@22ba5cc1): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1987425868)): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1763236781-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1763236781-88-acceptor-0@28c57ec0-ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:45121}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1763236781-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1763236781-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3fea6ec7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@e26d63): State: TIMED_WAITING Blocked count: 0 Waited count: 1042 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36193): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 270 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b68bd3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1406 Waited count: 1603 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@781af2e0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp360438187-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp360438187-122-acceptor-0@46f20d0c-ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:33277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp360438187-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1422999152) connection to localhost/127.0.0.1:38709 from jenkins): State: TIMED_WAITING Blocked count: 1503 Waited count: 1504 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp360438187-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 0 Waited count: 2088 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-bfbca57-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6e7c0da6): State: TIMED_WAITING Blocked count: 0 Waited count: 1041 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44575): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 302 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@99154bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1429 Waited count: 1614 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3dc3e52b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp767364239-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp767364239-154-acceptor-0@e36c42f-ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:41355}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp767364239-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp767364239-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-902628f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f93de06): State: TIMED_WAITING Blocked count: 0 Waited count: 1040 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40185): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 319 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18da6b11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1396 Waited count: 1605 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c0c9066): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 531 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 522 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 523 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (java.util.concurrent.ThreadPoolExecutor$Worker@13cf618e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@677b2ec3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@42b019c5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62515): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 20 Waited count: 728 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6134f26a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:62515):): State: WAITING Blocked count: 1 Waited count: 848 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 871 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a9edb39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37ec9f05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 372 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:62515)): State: RUNNABLE Blocked count: 49 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f91b45a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 33 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226e9b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51614a46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 193 Waited count: 811 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ff22cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 122 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@175e754a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883): State: WAITING Blocked count: 44 Waited count: 9172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22006578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57db498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61ed6558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4067e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@78774fa8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 308 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 328 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;7a6e4bf70377:45883): State: TIMED_WAITING Blocked count: 6 Waited count: 3354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$948/0x00007f17a8f1a6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 349 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (org.apache.hadoop.hdfs.PeerCache@5d0ed6dc): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 374 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5138 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 391 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 17 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 38 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27e6fa73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 449 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46cd7caf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@546aaa7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44cfe7e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f32c6be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 50944 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 603 (region-location-1): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 604 (region-location-2): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (region-location-3): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1011 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 441 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1075 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1114 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17aad65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1124 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1571 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3c8f02ce Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1597 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 640 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2263 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2565 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5385 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5386 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5387 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9826 (AsyncFSWAL-1-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@439b6ad8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9828 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T19:52:54,259 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:53:24,259 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;7a6e4bf70377:45883 218 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@3e41e187 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@536e0147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5855 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@5751413b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12657 Waited count: 13314 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@31a60107 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@12614a4d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 1165 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41-acceptor-0@72a1dc6d-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:34747}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74f576f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 38709): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2fb21527): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 56783 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1393 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47396ff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 38709): State: TIMED_WAITING Blocked count: 122 Waited count: 2549 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 38709): State: TIMED_WAITING Blocked count: 121 Waited count: 2554 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 38709): State: TIMED_WAITING Blocked count: 73 Waited count: 2560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 38709): State: TIMED_WAITING Blocked count: 99 Waited count: 2555 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 38709): State: TIMED_WAITING Blocked count: 95 Waited count: 2561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b9fd904): State: TIMED_WAITING Blocked count: 0 Waited count: 291 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@365338c3): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@5be1a34a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@22ba5cc1): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1987425868)): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1763236781-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1763236781-88-acceptor-0@28c57ec0-ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:45121}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1763236781-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1763236781-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3fea6ec7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@e26d63): State: TIMED_WAITING Blocked count: 0 Waited count: 1162 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36193): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 290 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b68bd3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1426 Waited count: 1643 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@781af2e0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 614 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 605 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp360438187-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp360438187-122-acceptor-0@46f20d0c-ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:33277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp360438187-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1422999152) connection to localhost/127.0.0.1:38709 from jenkins): State: TIMED_WAITING Blocked count: 1563 Waited count: 1564 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp360438187-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 0 Waited count: 2148 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-bfbca57-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6e7c0da6): State: TIMED_WAITING Blocked count: 0 Waited count: 1161 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44575): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@99154bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1449 Waited count: 1654 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3dc3e52b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp767364239-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp767364239-154-acceptor-0@e36c42f-ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:41355}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp767364239-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp767364239-156): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-902628f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f93de06): State: TIMED_WAITING Blocked count: 0 Waited count: 1160 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40185): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 339 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18da6b11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1416 Waited count: 1645 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c0c9066): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 591 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 582 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 583 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (java.util.concurrent.ThreadPoolExecutor$Worker@13cf618e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@677b2ec3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@42b019c5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62515): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 20 Waited count: 732 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6134f26a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:62515):): State: WAITING Blocked count: 1 Waited count: 852 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 875 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a9edb39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37ec9f05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:62515)): State: RUNNABLE Blocked count: 49 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f91b45a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 33 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226e9b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51614a46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 193 Waited count: 811 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ff22cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 122 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@175e754a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883): State: WAITING Blocked count: 44 Waited count: 9172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22006578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57db498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61ed6558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4067e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@78774fa8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 308 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 328 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;7a6e4bf70377:45883): State: TIMED_WAITING Blocked count: 6 Waited count: 3354 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$948/0x00007f17a8f1a6b8.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 349 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (org.apache.hadoop.hdfs.PeerCache@5d0ed6dc): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 374 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5738 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 391 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 17 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 38 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27e6fa73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57324 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 449 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46cd7caf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@546aaa7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44cfe7e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f32c6be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 56946 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 603 (region-location-1): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 604 (region-location-2): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (region-location-3): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1011 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1075 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1114 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17aad65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1124 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1571 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3c8f02ce Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2263 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2565 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5385 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5386 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5387 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9826 (AsyncFSWAL-1-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@439b6ad8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9828 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-16T19:53:54,259 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:53:57,330 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-16T19:53:57,334 DEBUG [master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-16T19:54:06,831 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-16T19:54:24,259 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-16T19:54:33,303 DEBUG [M:0;7a6e4bf70377:45883 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-16T19:54:33,303 WARN [M:0;7a6e4bf70377:45883 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3730, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3730, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-16T19:54:33,305 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:54:33,308 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-16T19:54:33,308 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-16T19:54:33,308 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 2024-12-16T19:54:33,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 after 1ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:54:33,310 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:54:33,310 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 2024-12-16T19:54:33,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;7a6e4bf70377:45883 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 17 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@3e41e187 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 12 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@536e0147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6455 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@6a23368e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12657 Waited count: 13315 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 11 Waited count: 12 Waiting on java.lang.ref.ReferenceQueue$Lock@31a60107 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@12614a4d Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 1285 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41-acceptor-0@72a1dc6d-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:34747}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 27 Waited count: 3222 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74f576f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 38709): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2fb21527): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 214 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 215 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 62726 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1393 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@47396ff Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 38709): State: TIMED_WAITING Blocked count: 122 Waited count: 2610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 38709): State: TIMED_WAITING Blocked count: 121 Waited count: 2615 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 38709): State: TIMED_WAITING Blocked count: 73 Waited count: 2621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 38709): State: TIMED_WAITING Blocked count: 99 Waited count: 2616 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 38709): State: TIMED_WAITING Blocked count: 95 Waited count: 2621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@2b9fd904): State: TIMED_WAITING Blocked count: 0 Waited count: 321 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@365338c3): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@5be1a34a): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@22ba5cc1): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1987425868)): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1763236781-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1763236781-88-acceptor-0@28c57ec0-ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:45121}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1763236781-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp1763236781-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-3fea6ec7-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@e26d63): State: TIMED_WAITING Blocked count: 0 Waited count: 1282 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 36193): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 1 Waited count: 310 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6b68bd3f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1446 Waited count: 1683 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@781af2e0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 641 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 668 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 36193): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp360438187-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp360438187-122-acceptor-0@46f20d0c-ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:33277}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp360438187-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 113 (IPC Client (1422999152) connection to localhost/127.0.0.1:38709 from jenkins): State: TIMED_WAITING Blocked count: 1623 Waited count: 1624 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 124 (qtp360438187-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 0 Waited count: 2208 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-bfbca57-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6e7c0da6): State: TIMED_WAITING Blocked count: 0 Waited count: 1281 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44575): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@99154bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1469 Waited count: 1694 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3dc3e52b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 44575): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp767364239-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$253/0x00007f17a8428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp767364239-154-acceptor-0@e36c42f-ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:41355}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp767364239-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp767364239-156): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-902628f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@1f93de06): State: TIMED_WAITING Blocked count: 0 Waited count: 1280 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 40185): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 0 Waited count: 359 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@18da6b11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709): State: TIMED_WAITING Blocked count: 1436 Waited count: 1685 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@2c0c9066): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 651 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 642 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 40185): State: TIMED_WAITING Blocked count: 0 Waited count: 643 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2)): State: TIMED_WAITING Blocked count: 3 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 191 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 192 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 195 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 203 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@9b75864 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 205 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (java.util.concurrent.ThreadPoolExecutor$Worker@13cf618e[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 214 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3527484d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 218 (java.util.concurrent.ThreadPoolExecutor$Worker@677b2ec3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 220 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 226 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 230 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3523db09 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 233 (java.util.concurrent.ThreadPoolExecutor$Worker@42b019c5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 234 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 236 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 237 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 238 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:62515): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 235 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 239 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 320 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 240 (SyncThread:0): State: WAITING Blocked count: 20 Waited count: 737 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6134f26a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 241 (ProcessThread(sid:0 cport:62515):): State: WAITING Blocked count: 1 Waited count: 857 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@41cebb9a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 242 (RequestThrottler): State: WAITING Blocked count: 1 Waited count: 880 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6a9edb39 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 243 (NIOWorkerThread-1): State: WAITING Blocked count: 1 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 254 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37ec9f05 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 255 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 430 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (Time-limited test-SendThread(127.0.0.1:62515)): State: RUNNABLE Blocked count: 49 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 258 (Time-limited test-EventThread): State: WAITING Blocked count: 3 Waited count: 56 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f91b45a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 259 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (zk-event-processor-pool-0): State: WAITING Blocked count: 33 Waited count: 87 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@226e9b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-5): State: WAITING Blocked count: 2 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-6): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-9): State: WAITING Blocked count: 4 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-10): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-15): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@710d94ec Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@51614a46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 276 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 193 Waited count: 811 Waiting on java.util.concurrent.Semaphore$NonfairSync@5ff22cb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 122 Waited count: 472 Waiting on java.util.concurrent.Semaphore$NonfairSync@175e754a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45883): State: WAITING Blocked count: 44 Waited count: 9172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@22006578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@79683804 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@57db498 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@61ed6558 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@4067e7b8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=45883): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@78774fa8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 288 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 308 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 328 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 39 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 286 (M:0;7a6e4bf70377:45883): State: TIMED_WAITING Blocked count: 6 Waited count: 3355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 349 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 351 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (master/7a6e4bf70377:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (org.apache.hadoop.hdfs.PeerCache@5d0ed6dc): State: TIMED_WAITING Blocked count: 0 Waited count: 212 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 374 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6337 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 391 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 17 Waited count: 5 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 392 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 38 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@27e6fa73 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63326 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 25 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 37 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 449 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@46cd7caf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 8 Waited count: 17 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@546aaa7c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 475 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 18 Waited count: 37 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@44cfe7e0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/7a6e4bf70377:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f32c6be Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 513 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 525 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 527 (region-location-0): State: WAITING Blocked count: 8 Waited count: 15 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 62948 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 566 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 20 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 603 (region-location-1): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 604 (region-location-2): State: WAITING Blocked count: 5 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 605 (region-location-3): State: WAITING Blocked count: 4 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1011 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1075 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 23 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1114 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1119 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 62 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@17aad65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1124 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1571 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@3c8f02ce Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2263 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2565 (region-location-4): State: WAITING Blocked count: 2 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2b7eff3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5385 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5386 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5387 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9826 (AsyncFSWAL-1-hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData-prefix:7a6e4bf70377,45883,1734378235170): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@439b6ad8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9829 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9832 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 9837 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 9838 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1121/0x00007f17a9175798.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-16T19:54:37,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-16T19:54:38,305 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-16T19:54:38,305 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-16T19:54:38,305 INFO [M:0;7a6e4bf70377:45883 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-16T19:54:38,306 INFO [M:0;7a6e4bf70377:45883 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:45883 2024-12-16T19:54:38,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:38709/user/jenkins/test-data/2169dc9f-8777-6451-483f-054f4973cf7c/MasterData/WALs/7a6e4bf70377,45883,1734378235170/7a6e4bf70377%2C45883%2C1734378235170.1734378238313 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-16T19:54:38,356 DEBUG [M:0;7a6e4bf70377:45883 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/7a6e4bf70377,45883,1734378235170 already deleted, retry=false 2024-12-16T19:54:38,464 INFO [M:0;7a6e4bf70377:45883 {}] regionserver.HRegionServer(1307): Exiting; stopping=7a6e4bf70377,45883,1734378235170; zookeeper connection closed. 2024-12-16T19:54:38,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:54:38,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45883-0x100e481a0950000, quorum=127.0.0.1:62515, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-16T19:54:38,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@52f77f0c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T19:54:38,506 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@644aedae{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:54:38,506 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:54:38,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@622b2501{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T19:54:38,506 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ce8b71{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:54:38,510 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T19:54:38,510 WARN [BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T19:54:38,510 WARN [BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1238680177-172.17.0.2-1734378229927 (Datanode Uuid 65a82667-85a4-4311-882b-d13d249bdfc6) service to localhost/127.0.0.1:38709 2024-12-16T19:54:38,510 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T19:54:38,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data5/current/BP-1238680177-172.17.0.2-1734378229927 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T19:54:38,512 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data6/current/BP-1238680177-172.17.0.2-1734378229927 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T19:54:38,513 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T19:54:38,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b96c006{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T19:54:38,515 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17bf7c8b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:54:38,515 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:54:38,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4b9c8e49{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T19:54:38,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b9deb82{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:54:38,517 WARN [BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T19:54:38,517 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T19:54:38,517 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T19:54:38,517 WARN [BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1238680177-172.17.0.2-1734378229927 (Datanode Uuid 5b533e50-8d8a-4498-97b8-acb11831b9a2) service to localhost/127.0.0.1:38709 2024-12-16T19:54:38,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data3/current/BP-1238680177-172.17.0.2-1734378229927 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T19:54:38,518 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data4/current/BP-1238680177-172.17.0.2-1734378229927 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T19:54:38,518 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T19:54:38,520 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@57cef19b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-16T19:54:38,521 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b4937c3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:54:38,521 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:54:38,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d0a94c7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T19:54:38,521 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a0dab5a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:54:38,522 WARN [BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-16T19:54:38,522 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-16T19:54:38,523 WARN [BP-1238680177-172.17.0.2-1734378229927 heartbeating to localhost/127.0.0.1:38709 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1238680177-172.17.0.2-1734378229927 (Datanode Uuid 2888fb3c-2097-4f16-bd71-ae02aa7cea00) service to localhost/127.0.0.1:38709 2024-12-16T19:54:38,523 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-16T19:54:38,523 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data1/current/BP-1238680177-172.17.0.2-1734378229927 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T19:54:38,524 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/cluster_a20c26a9-923e-ea35-1e67-28edd70bc4b4/dfs/data/data2/current/BP-1238680177-172.17.0.2-1734378229927 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-16T19:54:38,524 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-16T19:54:38,530 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5da2d515{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-16T19:54:38,531 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-16T19:54:38,531 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-16T19:54:38,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70357eda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-16T19:54:38,531 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@744df411{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/bf749f05-aa1a-be20-dfc2-f327a1bbde60/hadoop.log.dir/,STOPPED} 2024-12-16T19:54:38,544 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-16T19:54:38,868 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down